Print this page
Import some changes from FreeBSD (details later, this is quick-n-dirty for now).
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_common.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_common.c
1 1 /******************************************************************************
2 2
3 - Copyright (c) 2001-2012, Intel Corporation
3 + Copyright (c) 2001-2013, Intel Corporation
4 4 All rights reserved.
5 5
6 6 Redistribution and use in source and binary forms, with or without
7 7 modification, are permitted provided that the following conditions are met:
8 8
9 9 1. Redistributions of source code must retain the above copyright notice,
10 10 this list of conditions and the following disclaimer.
11 11
12 12 2. Redistributions in binary form must reproduce the above copyright
13 13 notice, this list of conditions and the following disclaimer in the
14 14 documentation and/or other materials provided with the distribution.
15 15
16 16 3. Neither the name of the Intel Corporation nor the names of its
17 17 contributors may be used to endorse or promote products derived from
18 18 this software without specific prior written permission.
19 19
20 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 30 POSSIBILITY OF SUCH DAMAGE.
31 31
32 32 ******************************************************************************/
33 33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_common.c,v 1.14 2012/07/05 20:51:44 jfv Exp $*/
34 34
35 35 #include "ixgbe_common.h"
36 36 #include "ixgbe_phy.h"
37 37 #include "ixgbe_api.h"
38 38
39 39 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
40 40 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
41 41 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
42 42 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
43 43 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
44 44 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
45 45 u16 count);
46 46 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
47 47 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
48 48 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 49 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
50 50
51 51 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
52 52 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
53 53 u16 *san_mac_offset);
54 54 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
55 55 u16 words, u16 *data);
56 56 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
57 57 u16 words, u16 *data);
58 58 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
59 59 u16 offset);
60 60
61 61 /**
62 62 * ixgbe_init_ops_generic - Inits function ptrs
63 63 * @hw: pointer to the hardware structure
64 64 *
65 65 * Initialize the function pointers.
66 66 **/
67 67 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
68 68 {
69 69 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
70 70 struct ixgbe_mac_info *mac = &hw->mac;
71 71 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
72 72
73 73 DEBUGFUNC("ixgbe_init_ops_generic");
74 74
75 75 /* EEPROM */
76 76 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
77 77 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
78 78 if (eec & IXGBE_EEC_PRES) {
79 79 eeprom->ops.read = &ixgbe_read_eerd_generic;
80 80 eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
81 81 } else {
82 82 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
83 83 eeprom->ops.read_buffer =
84 84 &ixgbe_read_eeprom_buffer_bit_bang_generic;
85 85 }
86 86 eeprom->ops.write = &ixgbe_write_eeprom_generic;
87 87 eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
88 88 eeprom->ops.validate_checksum =
89 89 &ixgbe_validate_eeprom_checksum_generic;
90 90 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
91 91 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
92 92
93 93 /* MAC */
94 94 mac->ops.init_hw = &ixgbe_init_hw_generic;
95 95 mac->ops.reset_hw = NULL;
96 96 mac->ops.start_hw = &ixgbe_start_hw_generic;
97 97 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
98 98 mac->ops.get_media_type = NULL;
99 99 mac->ops.get_supported_physical_layer = NULL;
100 100 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
101 101 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
102 102 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
103 103 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
104 104 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
105 105 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
106 106 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
107 107
108 108 /* LEDs */
109 109 mac->ops.led_on = &ixgbe_led_on_generic;
110 110 mac->ops.led_off = &ixgbe_led_off_generic;
111 111 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
112 112 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
113 113
114 114 /* RAR, Multicast, VLAN */
115 115 mac->ops.set_rar = &ixgbe_set_rar_generic;
116 116 mac->ops.clear_rar = &ixgbe_clear_rar_generic;
117 117 mac->ops.insert_mac_addr = NULL;
118 118 mac->ops.set_vmdq = NULL;
119 119 mac->ops.clear_vmdq = NULL;
120 120 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
121 121 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
122 122 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
123 123 mac->ops.enable_mc = &ixgbe_enable_mc_generic;
124 124 mac->ops.disable_mc = &ixgbe_disable_mc_generic;
125 125 mac->ops.clear_vfta = NULL;
126 126 mac->ops.set_vfta = NULL;
127 127 mac->ops.set_vlvf = NULL;
128 128 mac->ops.init_uta_tables = NULL;
129 129
130 130 /* Flow Control */
131 131 mac->ops.fc_enable = &ixgbe_fc_enable_generic;
132 132
133 133 /* Link */
134 134 mac->ops.get_link_capabilities = NULL;
135 135 mac->ops.setup_link = NULL;
136 136 mac->ops.check_link = NULL;
137 137
138 138 return IXGBE_SUCCESS;
139 139 }
|
↓ open down ↓ |
126 lines elided |
↑ open up ↑ |
140 140
141 141 /**
142 142 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
143 143 * control
144 144 * @hw: pointer to hardware structure
145 145 *
146 146 * There are several phys that do not support autoneg flow control. This
147 147 * function check the device id to see if the associated phy supports
148 148 * autoneg flow control.
149 149 **/
150 -static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
150 +s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
151 151 {
152 152
153 153 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
154 154
155 155 switch (hw->device_id) {
156 + case IXGBE_DEV_ID_82599_T3_LOM:
156 157 case IXGBE_DEV_ID_X540T:
157 - case IXGBE_DEV_ID_X540T1:
158 158 return IXGBE_SUCCESS;
159 - case IXGBE_DEV_ID_82599_T3_LOM:
160 - return IXGBE_SUCCESS;
161 159 default:
162 160 return IXGBE_ERR_FC_NOT_SUPPORTED;
163 161 }
164 162 }
165 163
166 164 /**
167 165 * ixgbe_setup_fc - Set up flow control
168 166 * @hw: pointer to hardware structure
169 167 *
170 168 * Called at init time to set up flow control.
171 169 **/
172 170 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
173 171 {
174 172 s32 ret_val = IXGBE_SUCCESS;
175 173 u32 reg = 0, reg_bp = 0;
176 174 u16 reg_cu = 0;
175 + bool got_lock = FALSE;
177 176
178 177 DEBUGFUNC("ixgbe_setup_fc");
179 178
180 179 /*
181 180 * Validate the requested mode. Strict IEEE mode does not allow
182 181 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
183 182 */
184 183 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
185 184 DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
186 185 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
187 186 goto out;
188 187 }
189 188
190 189 /*
191 190 * 10gig parts do not have a word in the EEPROM to determine the
192 191 * default flow control setting, so we explicitly set it to full.
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
193 192 */
194 193 if (hw->fc.requested_mode == ixgbe_fc_default)
195 194 hw->fc.requested_mode = ixgbe_fc_full;
196 195
197 196 /*
198 197 * Set up the 1G and 10G flow control advertisement registers so the
199 198 * HW will be able to do fc autoneg once the cable is plugged in. If
200 199 * we link at 10G, the 1G advertisement is harmless and vice versa.
201 200 */
202 201 switch (hw->phy.media_type) {
202 + case ixgbe_media_type_fiber_fixed:
203 203 case ixgbe_media_type_fiber:
204 204 case ixgbe_media_type_backplane:
205 205 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
206 206 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
207 207 break;
208 208 case ixgbe_media_type_copper:
209 209 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
210 210 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
211 211 break;
212 212 default:
213 213 break;
214 214 }
215 215
216 216 /*
217 217 * The possible values of fc.requested_mode are:
218 218 * 0: Flow control is completely disabled
219 219 * 1: Rx flow control is enabled (we can receive pause frames,
220 220 * but not send pause frames).
221 221 * 2: Tx flow control is enabled (we can send pause frames but
222 222 * we do not support receiving pause frames).
223 223 * 3: Both Rx and Tx flow control (symmetric) are enabled.
224 224 * other: Invalid.
225 225 */
226 226 switch (hw->fc.requested_mode) {
227 227 case ixgbe_fc_none:
228 228 /* Flow control completely disabled by software override. */
229 229 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
230 230 if (hw->phy.media_type == ixgbe_media_type_backplane)
231 231 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
232 232 IXGBE_AUTOC_ASM_PAUSE);
233 233 else if (hw->phy.media_type == ixgbe_media_type_copper)
234 234 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
235 235 break;
236 236 case ixgbe_fc_tx_pause:
237 237 /*
238 238 * Tx Flow control is enabled, and Rx Flow control is
239 239 * disabled by software override.
240 240 */
241 241 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
242 242 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
243 243 if (hw->phy.media_type == ixgbe_media_type_backplane) {
244 244 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
245 245 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
246 246 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
247 247 reg_cu |= IXGBE_TAF_ASM_PAUSE;
248 248 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
249 249 }
250 250 break;
251 251 case ixgbe_fc_rx_pause:
252 252 /*
253 253 * Rx Flow control is enabled and Tx Flow control is
254 254 * disabled by software override. Since there really
255 255 * isn't a way to advertise that we are capable of RX
256 256 * Pause ONLY, we will advertise that we support both
257 257 * symmetric and asymmetric Rx PAUSE, as such we fall
258 258 * through to the fc_full statement. Later, we will
259 259 * disable the adapter's ability to send PAUSE frames.
260 260 */
261 261 case ixgbe_fc_full:
262 262 /* Flow control (both Rx and Tx) is enabled by SW override. */
263 263 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
264 264 if (hw->phy.media_type == ixgbe_media_type_backplane)
265 265 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
266 266 IXGBE_AUTOC_ASM_PAUSE;
267 267 else if (hw->phy.media_type == ixgbe_media_type_copper)
268 268 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
269 269 break;
270 270 default:
271 271 DEBUGOUT("Flow control param set incorrectly\n");
272 272 ret_val = IXGBE_ERR_CONFIG;
273 273 goto out;
274 274 }
275 275
276 276 if (hw->mac.type != ixgbe_mac_X540) {
277 277 /*
278 278 * Enable auto-negotiation between the MAC & PHY;
279 279 * the MAC will advertise clause 37 flow control.
280 280 */
281 281 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
282 282 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
283 283
284 284 /* Disable AN timeout */
285 285 if (hw->fc.strict_ieee)
286 286 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
287 287
288 288 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
|
↓ open down ↓ |
76 lines elided |
↑ open up ↑ |
289 289 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
290 290 }
291 291
292 292 /*
293 293 * AUTOC restart handles negotiation of 1G and 10G on backplane
294 294 * and copper. There is no need to set the PCS1GCTL register.
295 295 *
296 296 */
297 297 if (hw->phy.media_type == ixgbe_media_type_backplane) {
298 298 reg_bp |= IXGBE_AUTOC_AN_RESTART;
299 + /* Need the SW/FW semaphore around AUTOC writes if 82599 and
300 + * LESM is on, likewise reset_pipeline requries the lock as
301 + * it also writes AUTOC.
302 + */
303 + if ((hw->mac.type == ixgbe_mac_82599EB) &&
304 + ixgbe_verify_lesm_fw_enabled_82599(hw)) {
305 + ret_val = hw->mac.ops.acquire_swfw_sync(hw,
306 + IXGBE_GSSR_MAC_CSR_SM);
307 + if (ret_val != IXGBE_SUCCESS) {
308 + ret_val = IXGBE_ERR_SWFW_SYNC;
309 + goto out;
310 + }
311 + got_lock = TRUE;
312 + }
313 +
299 314 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
315 + if (hw->mac.type == ixgbe_mac_82599EB)
316 + (void) ixgbe_reset_pipeline_82599(hw);
317 +
318 + if (got_lock)
319 + hw->mac.ops.release_swfw_sync(hw,
320 + IXGBE_GSSR_MAC_CSR_SM);
300 321 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
301 322 (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
302 323 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
303 324 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
304 325 }
305 326
306 327 DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
307 328 out:
308 329 return ret_val;
309 330 }
310 331
311 332 /**
312 333 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
313 334 * @hw: pointer to hardware structure
314 335 *
315 336 * Starts the hardware by filling the bus info structure and media type, clears
316 337 * all on chip counters, initializes receive address registers, multicast
317 338 * table, VLAN filter table, calls routine to set up link and flow control
318 339 * settings, and leaves transmit and receive units disabled and uninitialized
319 340 **/
320 341 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
321 342 {
322 343 s32 ret_val;
323 344 u32 ctrl_ext;
324 345
325 346 DEBUGFUNC("ixgbe_start_hw_generic");
326 347
327 348 /* Set the media type */
328 349 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
329 350
330 351 /* PHY ops initialization must be done in reset_hw() */
331 352
332 353 /* Clear the VLAN filter table */
333 354 hw->mac.ops.clear_vfta(hw);
334 355
335 356 /* Clear statistics registers */
336 357 hw->mac.ops.clear_hw_cntrs(hw);
337 358
338 359 /* Set No Snoop Disable */
339 360 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
340 361 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
341 362 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
342 363 IXGBE_WRITE_FLUSH(hw);
343 364
344 365 /* Setup flow control */
345 366 ret_val = ixgbe_setup_fc(hw);
346 367 if (ret_val != IXGBE_SUCCESS)
347 368 goto out;
348 369
349 370 /* Clear adapter stopped flag */
350 371 hw->adapter_stopped = FALSE;
351 372
352 373 out:
353 374 return ret_val;
354 375 }
355 376
356 377 /**
357 378 * ixgbe_start_hw_gen2 - Init sequence for common device family
358 379 * @hw: pointer to hw structure
359 380 *
360 381 * Performs the init sequence common to the second generation
361 382 * of 10 GbE devices.
362 383 * Devices in the second generation:
363 384 * 82599
364 385 * X540
365 386 **/
366 387 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
367 388 {
368 389 u32 i;
369 390 u32 regval;
370 391
371 392 /* Clear the rate limiters */
372 393 for (i = 0; i < hw->mac.max_tx_queues; i++) {
373 394 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
374 395 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
375 396 }
376 397 IXGBE_WRITE_FLUSH(hw);
377 398
378 399 /* Disable relaxed ordering */
379 400 for (i = 0; i < hw->mac.max_tx_queues; i++) {
380 401 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
381 402 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
382 403 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
383 404 }
384 405
385 406 for (i = 0; i < hw->mac.max_rx_queues; i++) {
386 407 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
387 408 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
388 409 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
389 410 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
390 411 }
391 412
392 413 return IXGBE_SUCCESS;
393 414 }
394 415
395 416 /**
396 417 * ixgbe_init_hw_generic - Generic hardware initialization
397 418 * @hw: pointer to hardware structure
398 419 *
399 420 * Initialize the hardware by resetting the hardware, filling the bus info
400 421 * structure and media type, clears all on chip counters, initializes receive
401 422 * address registers, multicast table, VLAN filter table, calls routine to set
402 423 * up link and flow control settings, and leaves transmit and receive units
403 424 * disabled and uninitialized
404 425 **/
405 426 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
406 427 {
407 428 s32 status;
408 429
409 430 DEBUGFUNC("ixgbe_init_hw_generic");
410 431
411 432 /* Reset the hardware */
412 433 status = hw->mac.ops.reset_hw(hw);
413 434
414 435 if (status == IXGBE_SUCCESS) {
415 436 /* Start the HW */
416 437 status = hw->mac.ops.start_hw(hw);
417 438 }
418 439
419 440 return status;
420 441 }
421 442
422 443 /**
423 444 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
424 445 * @hw: pointer to hardware structure
425 446 *
426 447 * Clears all hardware statistics counters by reading them from the hardware
427 448 * Statistics counters are clear on read.
428 449 **/
429 450 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
430 451 {
431 452 u16 i = 0;
432 453
433 454 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
434 455
435 456 (void) IXGBE_READ_REG(hw, IXGBE_CRCERRS);
436 457 (void) IXGBE_READ_REG(hw, IXGBE_ILLERRC);
437 458 (void) IXGBE_READ_REG(hw, IXGBE_ERRBC);
438 459 (void) IXGBE_READ_REG(hw, IXGBE_MSPDC);
439 460 for (i = 0; i < 8; i++)
440 461 (void) IXGBE_READ_REG(hw, IXGBE_MPC(i));
441 462
442 463 (void) IXGBE_READ_REG(hw, IXGBE_MLFC);
443 464 (void) IXGBE_READ_REG(hw, IXGBE_MRFC);
444 465 (void) IXGBE_READ_REG(hw, IXGBE_RLEC);
445 466 (void) IXGBE_READ_REG(hw, IXGBE_LXONTXC);
446 467 (void) IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
447 468 if (hw->mac.type >= ixgbe_mac_82599EB) {
448 469 (void) IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
449 470 (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
450 471 } else {
451 472 (void) IXGBE_READ_REG(hw, IXGBE_LXONRXC);
452 473 (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
453 474 }
454 475
455 476 for (i = 0; i < 8; i++) {
456 477 (void) IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
457 478 (void) IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
458 479 if (hw->mac.type >= ixgbe_mac_82599EB) {
459 480 (void) IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
460 481 (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
461 482 } else {
462 483 (void) IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
463 484 (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
464 485 }
465 486 }
466 487 if (hw->mac.type >= ixgbe_mac_82599EB)
467 488 for (i = 0; i < 8; i++)
468 489 (void) IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
469 490 (void) IXGBE_READ_REG(hw, IXGBE_PRC64);
470 491 (void) IXGBE_READ_REG(hw, IXGBE_PRC127);
471 492 (void) IXGBE_READ_REG(hw, IXGBE_PRC255);
472 493 (void) IXGBE_READ_REG(hw, IXGBE_PRC511);
473 494 (void) IXGBE_READ_REG(hw, IXGBE_PRC1023);
474 495 (void) IXGBE_READ_REG(hw, IXGBE_PRC1522);
475 496 (void) IXGBE_READ_REG(hw, IXGBE_GPRC);
476 497 (void) IXGBE_READ_REG(hw, IXGBE_BPRC);
477 498 (void) IXGBE_READ_REG(hw, IXGBE_MPRC);
478 499 (void) IXGBE_READ_REG(hw, IXGBE_GPTC);
479 500 (void) IXGBE_READ_REG(hw, IXGBE_GORCL);
480 501 (void) IXGBE_READ_REG(hw, IXGBE_GORCH);
481 502 (void) IXGBE_READ_REG(hw, IXGBE_GOTCL);
482 503 (void) IXGBE_READ_REG(hw, IXGBE_GOTCH);
483 504 if (hw->mac.type == ixgbe_mac_82598EB)
484 505 for (i = 0; i < 8; i++)
485 506 (void) IXGBE_READ_REG(hw, IXGBE_RNBC(i));
486 507 (void) IXGBE_READ_REG(hw, IXGBE_RUC);
487 508 (void) IXGBE_READ_REG(hw, IXGBE_RFC);
488 509 (void) IXGBE_READ_REG(hw, IXGBE_ROC);
489 510 (void) IXGBE_READ_REG(hw, IXGBE_RJC);
490 511 (void) IXGBE_READ_REG(hw, IXGBE_MNGPRC);
491 512 (void) IXGBE_READ_REG(hw, IXGBE_MNGPDC);
492 513 (void) IXGBE_READ_REG(hw, IXGBE_MNGPTC);
493 514 (void) IXGBE_READ_REG(hw, IXGBE_TORL);
494 515 (void) IXGBE_READ_REG(hw, IXGBE_TORH);
495 516 (void) IXGBE_READ_REG(hw, IXGBE_TPR);
496 517 (void) IXGBE_READ_REG(hw, IXGBE_TPT);
497 518 (void) IXGBE_READ_REG(hw, IXGBE_PTC64);
498 519 (void) IXGBE_READ_REG(hw, IXGBE_PTC127);
499 520 (void) IXGBE_READ_REG(hw, IXGBE_PTC255);
500 521 (void) IXGBE_READ_REG(hw, IXGBE_PTC511);
501 522 (void) IXGBE_READ_REG(hw, IXGBE_PTC1023);
502 523 (void) IXGBE_READ_REG(hw, IXGBE_PTC1522);
503 524 (void) IXGBE_READ_REG(hw, IXGBE_MPTC);
504 525 (void) IXGBE_READ_REG(hw, IXGBE_BPTC);
505 526 for (i = 0; i < 16; i++) {
506 527 (void) IXGBE_READ_REG(hw, IXGBE_QPRC(i));
507 528 (void) IXGBE_READ_REG(hw, IXGBE_QPTC(i));
508 529 if (hw->mac.type >= ixgbe_mac_82599EB) {
509 530 (void) IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
510 531 (void) IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
511 532 (void) IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
512 533 (void) IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
513 534 (void) IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
514 535 } else {
515 536 (void) IXGBE_READ_REG(hw, IXGBE_QBRC(i));
516 537 (void) IXGBE_READ_REG(hw, IXGBE_QBTC(i));
517 538 }
518 539 }
519 540
520 541 if (hw->mac.type == ixgbe_mac_X540) {
521 542 if (hw->phy.id == 0)
522 543 (void) ixgbe_identify_phy(hw);
523 544 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
524 545 IXGBE_MDIO_PCS_DEV_TYPE, &i);
525 546 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
526 547 IXGBE_MDIO_PCS_DEV_TYPE, &i);
527 548 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
528 549 IXGBE_MDIO_PCS_DEV_TYPE, &i);
529 550 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
530 551 IXGBE_MDIO_PCS_DEV_TYPE, &i);
531 552 }
532 553
533 554 return IXGBE_SUCCESS;
534 555 }
535 556
536 557 /**
537 558 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
538 559 * @hw: pointer to hardware structure
539 560 * @pba_num: stores the part number string from the EEPROM
540 561 * @pba_num_size: part number string buffer length
541 562 *
542 563 * Reads the part number string from the EEPROM.
543 564 **/
544 565 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
545 566 u32 pba_num_size)
546 567 {
547 568 s32 ret_val;
548 569 u16 data;
549 570 u16 pba_ptr;
550 571 u16 offset;
551 572 u16 length;
552 573
553 574 DEBUGFUNC("ixgbe_read_pba_string_generic");
554 575
555 576 if (pba_num == NULL) {
556 577 DEBUGOUT("PBA string buffer was null\n");
557 578 return IXGBE_ERR_INVALID_ARGUMENT;
558 579 }
559 580
560 581 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
561 582 if (ret_val) {
562 583 DEBUGOUT("NVM Read Error\n");
563 584 return ret_val;
564 585 }
565 586
566 587 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
567 588 if (ret_val) {
568 589 DEBUGOUT("NVM Read Error\n");
569 590 return ret_val;
570 591 }
571 592
572 593 /*
573 594 * if data is not ptr guard the PBA must be in legacy format which
574 595 * means pba_ptr is actually our second data word for the PBA number
575 596 * and we can decode it into an ascii string
576 597 */
577 598 if (data != IXGBE_PBANUM_PTR_GUARD) {
578 599 DEBUGOUT("NVM PBA number is not stored as string\n");
579 600
580 601 /* we will need 11 characters to store the PBA */
581 602 if (pba_num_size < 11) {
582 603 DEBUGOUT("PBA string buffer too small\n");
583 604 return IXGBE_ERR_NO_SPACE;
584 605 }
585 606
586 607 /* extract hex string from data and pba_ptr */
587 608 pba_num[0] = (data >> 12) & 0xF;
588 609 pba_num[1] = (data >> 8) & 0xF;
589 610 pba_num[2] = (data >> 4) & 0xF;
590 611 pba_num[3] = data & 0xF;
591 612 pba_num[4] = (pba_ptr >> 12) & 0xF;
592 613 pba_num[5] = (pba_ptr >> 8) & 0xF;
593 614 pba_num[6] = '-';
594 615 pba_num[7] = 0;
595 616 pba_num[8] = (pba_ptr >> 4) & 0xF;
596 617 pba_num[9] = pba_ptr & 0xF;
597 618
598 619 /* put a null character on the end of our string */
599 620 pba_num[10] = '\0';
600 621
601 622 /* switch all the data but the '-' to hex char */
602 623 for (offset = 0; offset < 10; offset++) {
603 624 if (pba_num[offset] < 0xA)
604 625 pba_num[offset] += '0';
605 626 else if (pba_num[offset] < 0x10)
606 627 pba_num[offset] += 'A' - 0xA;
607 628 }
608 629
609 630 return IXGBE_SUCCESS;
610 631 }
611 632
612 633 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
613 634 if (ret_val) {
614 635 DEBUGOUT("NVM Read Error\n");
615 636 return ret_val;
616 637 }
617 638
618 639 if (length == 0xFFFF || length == 0) {
619 640 DEBUGOUT("NVM PBA number section invalid length\n");
620 641 return IXGBE_ERR_PBA_SECTION;
621 642 }
622 643
623 644 /* check if pba_num buffer is big enough */
624 645 if (pba_num_size < (((u32)length * 2) - 1)) {
625 646 DEBUGOUT("PBA string buffer too small\n");
626 647 return IXGBE_ERR_NO_SPACE;
627 648 }
628 649
629 650 /* trim pba length from start of string */
630 651 pba_ptr++;
631 652 length--;
632 653
633 654 for (offset = 0; offset < length; offset++) {
634 655 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
635 656 if (ret_val) {
636 657 DEBUGOUT("NVM Read Error\n");
637 658 return ret_val;
638 659 }
639 660 pba_num[offset * 2] = (u8)(data >> 8);
640 661 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
641 662 }
642 663 pba_num[offset * 2] = '\0';
643 664
644 665 return IXGBE_SUCCESS;
645 666 }
646 667
647 668 /**
648 669 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
649 670 * @hw: pointer to hardware structure
650 671 * @pba_num: stores the part number from the EEPROM
651 672 *
652 673 * Reads the part number from the EEPROM.
653 674 **/
654 675 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
655 676 {
656 677 s32 ret_val;
657 678 u16 data;
658 679
659 680 DEBUGFUNC("ixgbe_read_pba_num_generic");
660 681
661 682 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
662 683 if (ret_val) {
663 684 DEBUGOUT("NVM Read Error\n");
664 685 return ret_val;
665 686 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
666 687 DEBUGOUT("NVM Not supported\n");
667 688 return IXGBE_NOT_IMPLEMENTED;
668 689 }
669 690 *pba_num = (u32)(data << 16);
670 691
671 692 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
|
↓ open down ↓ |
362 lines elided |
↑ open up ↑ |
672 693 if (ret_val) {
673 694 DEBUGOUT("NVM Read Error\n");
674 695 return ret_val;
675 696 }
676 697 *pba_num |= data;
677 698
678 699 return IXGBE_SUCCESS;
679 700 }
680 701
681 702 /**
703 + * ixgbe_read_pba_raw
704 + * @hw: pointer to the HW structure
705 + * @eeprom_buf: optional pointer to EEPROM image
706 + * @eeprom_buf_size: size of EEPROM image in words
707 + * @max_pba_block_size: PBA block size limit
708 + * @pba: pointer to output PBA structure
709 + *
710 + * Reads PBA from EEPROM image when eeprom_buf is not NULL.
711 + * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
712 + *
713 + **/
714 +s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
715 + u32 eeprom_buf_size, u16 max_pba_block_size,
716 + struct ixgbe_pba *pba)
717 +{
718 + s32 ret_val;
719 + u16 pba_block_size;
720 +
721 + if (pba == NULL)
722 + return IXGBE_ERR_PARAM;
723 +
724 + if (eeprom_buf == NULL) {
725 + ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
726 + &pba->word[0]);
727 + if (ret_val)
728 + return ret_val;
729 + } else {
730 + if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
731 + pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
732 + pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
733 + } else {
734 + return IXGBE_ERR_PARAM;
735 + }
736 + }
737 +
738 + if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
739 + if (pba->pba_block == NULL)
740 + return IXGBE_ERR_PARAM;
741 +
742 + ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
743 + eeprom_buf_size,
744 + &pba_block_size);
745 + if (ret_val)
746 + return ret_val;
747 +
748 + if (pba_block_size > max_pba_block_size)
749 + return IXGBE_ERR_PARAM;
750 +
751 + if (eeprom_buf == NULL) {
752 + ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
753 + pba_block_size,
754 + pba->pba_block);
755 + if (ret_val)
756 + return ret_val;
757 + } else {
758 + if (eeprom_buf_size > (u32)(pba->word[1] +
759 + pba->pba_block[0])) {
760 + (void) memcpy(pba->pba_block,
761 + &eeprom_buf[pba->word[1]],
762 + pba_block_size * sizeof(u16));
763 + } else {
764 + return IXGBE_ERR_PARAM;
765 + }
766 + }
767 + }
768 +
769 + return IXGBE_SUCCESS;
770 +}
771 +
772 +/**
773 + * ixgbe_write_pba_raw
774 + * @hw: pointer to the HW structure
775 + * @eeprom_buf: optional pointer to EEPROM image
776 + * @eeprom_buf_size: size of EEPROM image in words
777 + * @pba: pointer to PBA structure
778 + *
779 + * Writes PBA to EEPROM image when eeprom_buf is not NULL.
780 + * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
781 + *
782 + **/
783 +s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
784 + u32 eeprom_buf_size, struct ixgbe_pba *pba)
785 +{
786 + s32 ret_val;
787 +
788 + if (pba == NULL)
789 + return IXGBE_ERR_PARAM;
790 +
791 + if (eeprom_buf == NULL) {
792 + ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
793 + &pba->word[0]);
794 + if (ret_val)
795 + return ret_val;
796 + } else {
797 + if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
798 + eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
799 + eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
800 + } else {
801 + return IXGBE_ERR_PARAM;
802 + }
803 + }
804 +
805 + if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
806 + if (pba->pba_block == NULL)
807 + return IXGBE_ERR_PARAM;
808 +
809 + if (eeprom_buf == NULL) {
810 + ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
811 + pba->pba_block[0],
812 + pba->pba_block);
813 + if (ret_val)
814 + return ret_val;
815 + } else {
816 + if (eeprom_buf_size > (u32)(pba->word[1] +
817 + pba->pba_block[0])) {
818 + (void) memcpy(&eeprom_buf[pba->word[1]],
819 + pba->pba_block,
820 + pba->pba_block[0] * sizeof(u16));
821 + } else {
822 + return IXGBE_ERR_PARAM;
823 + }
824 + }
825 + }
826 +
827 + return IXGBE_SUCCESS;
828 +}
829 +
830 +/**
831 + * ixgbe_get_pba_block_size
832 + * @hw: pointer to the HW structure
833 + * @eeprom_buf: optional pointer to EEPROM image
834 + * @eeprom_buf_size: size of EEPROM image in words
835 + * @pba_data_size: pointer to output variable
836 + *
837 + * Returns the size of the PBA block in words. Function operates on EEPROM
838 + * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
839 + * EEPROM device.
840 + *
841 + **/
842 +s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
843 + u32 eeprom_buf_size, u16 *pba_block_size)
844 +{
845 + s32 ret_val;
846 + u16 pba_word[2];
847 + u16 length;
848 +
849 + DEBUGFUNC("ixgbe_get_pba_block_size");
850 +
851 + if (eeprom_buf == NULL) {
852 + ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
853 + &pba_word[0]);
854 + if (ret_val)
855 + return ret_val;
856 + } else {
857 + if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
858 + pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
859 + pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
860 + } else {
861 + return IXGBE_ERR_PARAM;
862 + }
863 + }
864 +
865 + if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
866 + if (eeprom_buf == NULL) {
867 + ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
868 + &length);
869 + if (ret_val)
870 + return ret_val;
871 + } else {
872 + if (eeprom_buf_size > pba_word[1])
873 + length = eeprom_buf[pba_word[1] + 0];
874 + else
875 + return IXGBE_ERR_PARAM;
876 + }
877 +
878 + if (length == 0xFFFF || length == 0)
879 + return IXGBE_ERR_PBA_SECTION;
880 + } else {
881 + /* PBA number in legacy format, there is no PBA Block. */
882 + length = 0;
883 + }
884 +
885 + if (pba_block_size != NULL)
886 + *pba_block_size = length;
887 +
888 + return IXGBE_SUCCESS;
889 +}
890 +
891 +/**
682 892 * ixgbe_get_mac_addr_generic - Generic get MAC address
683 893 * @hw: pointer to hardware structure
684 894 * @mac_addr: Adapter MAC address
685 895 *
686 896 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
687 897 * A reset of the adapter must be performed prior to calling this function
688 898 * in order for the MAC address to have been loaded from the EEPROM into RAR0
689 899 **/
690 900 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
691 901 {
692 902 u32 rar_high;
693 903 u32 rar_low;
694 904 u16 i;
695 905
696 906 DEBUGFUNC("ixgbe_get_mac_addr_generic");
697 907
698 908 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
699 909 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
700 910
701 911 for (i = 0; i < 4; i++)
702 912 mac_addr[i] = (u8)(rar_low >> (i*8));
703 913
704 914 for (i = 0; i < 2; i++)
705 915 mac_addr[i+4] = (u8)(rar_high >> (i*8));
706 916
707 917 return IXGBE_SUCCESS;
708 918 }
709 919
710 920 /**
711 921 * ixgbe_get_bus_info_generic - Generic set PCI bus info
712 922 * @hw: pointer to hardware structure
713 923 *
714 924 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
715 925 **/
716 926 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
717 927 {
718 928 struct ixgbe_mac_info *mac = &hw->mac;
719 929 u16 link_status;
720 930
721 931 DEBUGFUNC("ixgbe_get_bus_info_generic");
722 932
723 933 hw->bus.type = ixgbe_bus_type_pci_express;
724 934
725 935 /* Get the negotiated link width and speed from PCI config space */
726 936 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
727 937
728 938 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
729 939 case IXGBE_PCI_LINK_WIDTH_1:
730 940 hw->bus.width = ixgbe_bus_width_pcie_x1;
731 941 break;
732 942 case IXGBE_PCI_LINK_WIDTH_2:
733 943 hw->bus.width = ixgbe_bus_width_pcie_x2;
734 944 break;
735 945 case IXGBE_PCI_LINK_WIDTH_4:
736 946 hw->bus.width = ixgbe_bus_width_pcie_x4;
737 947 break;
738 948 case IXGBE_PCI_LINK_WIDTH_8:
739 949 hw->bus.width = ixgbe_bus_width_pcie_x8;
740 950 break;
741 951 default:
742 952 hw->bus.width = ixgbe_bus_width_unknown;
743 953 break;
744 954 }
745 955
746 956 switch (link_status & IXGBE_PCI_LINK_SPEED) {
747 957 case IXGBE_PCI_LINK_SPEED_2500:
748 958 hw->bus.speed = ixgbe_bus_speed_2500;
749 959 break;
750 960 case IXGBE_PCI_LINK_SPEED_5000:
751 961 hw->bus.speed = ixgbe_bus_speed_5000;
752 962 break;
753 963 case IXGBE_PCI_LINK_SPEED_8000:
754 964 hw->bus.speed = ixgbe_bus_speed_8000;
755 965 break;
756 966 default:
757 967 hw->bus.speed = ixgbe_bus_speed_unknown;
758 968 break;
759 969 }
760 970
761 971 mac->ops.set_lan_id(hw);
762 972
763 973 return IXGBE_SUCCESS;
764 974 }
765 975
766 976 /**
767 977 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
768 978 * @hw: pointer to the HW structure
769 979 *
770 980 * Determines the LAN function id by reading memory-mapped registers
771 981 * and swaps the port value if requested.
772 982 **/
773 983 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
774 984 {
775 985 struct ixgbe_bus_info *bus = &hw->bus;
776 986 u32 reg;
777 987
778 988 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
779 989
780 990 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
781 991 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
782 992 bus->lan_id = bus->func;
783 993
784 994 /* check for a port swap */
785 995 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
786 996 if (reg & IXGBE_FACTPS_LFS)
787 997 bus->func ^= 0x1;
788 998 }
789 999
790 1000 /**
791 1001 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
792 1002 * @hw: pointer to hardware structure
793 1003 *
794 1004 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
795 1005 * disables transmit and receive units. The adapter_stopped flag is used by
796 1006 * the shared code and drivers to determine if the adapter is in a stopped
797 1007 * state and should not touch the hardware.
798 1008 **/
799 1009 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
800 1010 {
801 1011 u32 reg_val;
802 1012 u16 i;
803 1013
804 1014 DEBUGFUNC("ixgbe_stop_adapter_generic");
805 1015
806 1016 /*
807 1017 * Set the adapter_stopped flag so other driver functions stop touching
808 1018 * the hardware
809 1019 */
810 1020 hw->adapter_stopped = TRUE;
811 1021
812 1022 /* Disable the receive unit */
813 1023 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
814 1024
815 1025 /* Clear interrupt mask to stop interrupts from being generated */
816 1026 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
817 1027
818 1028 /* Clear any pending interrupts, flush previous writes */
819 1029 (void) IXGBE_READ_REG(hw, IXGBE_EICR);
820 1030
821 1031 /* Disable the transmit unit. Each queue must be disabled. */
822 1032 for (i = 0; i < hw->mac.max_tx_queues; i++)
823 1033 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
824 1034
825 1035 /* Disable the receive unit by stopping each queue */
826 1036 for (i = 0; i < hw->mac.max_rx_queues; i++) {
827 1037 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
828 1038 reg_val &= ~IXGBE_RXDCTL_ENABLE;
829 1039 reg_val |= IXGBE_RXDCTL_SWFLSH;
830 1040 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
831 1041 }
832 1042
833 1043 /* flush all queues disables */
834 1044 IXGBE_WRITE_FLUSH(hw);
835 1045 msec_delay(2);
836 1046
837 1047 /*
838 1048 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
839 1049 * access and verify no pending requests
840 1050 */
841 1051 return ixgbe_disable_pcie_master(hw);
842 1052 }
843 1053
844 1054 /**
845 1055 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
846 1056 * @hw: pointer to hardware structure
847 1057 * @index: led number to turn on
848 1058 **/
849 1059 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
850 1060 {
851 1061 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
852 1062
853 1063 DEBUGFUNC("ixgbe_led_on_generic");
854 1064
855 1065 /* To turn on the LED, set mode to ON. */
856 1066 led_reg &= ~IXGBE_LED_MODE_MASK(index);
857 1067 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
858 1068 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
859 1069 IXGBE_WRITE_FLUSH(hw);
860 1070
861 1071 return IXGBE_SUCCESS;
862 1072 }
863 1073
864 1074 /**
865 1075 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
866 1076 * @hw: pointer to hardware structure
867 1077 * @index: led number to turn off
868 1078 **/
869 1079 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
870 1080 {
871 1081 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
872 1082
873 1083 DEBUGFUNC("ixgbe_led_off_generic");
874 1084
875 1085 /* To turn off the LED, set mode to OFF. */
876 1086 led_reg &= ~IXGBE_LED_MODE_MASK(index);
877 1087 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
878 1088 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
879 1089 IXGBE_WRITE_FLUSH(hw);
880 1090
881 1091 return IXGBE_SUCCESS;
882 1092 }
883 1093
884 1094 /**
885 1095 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
886 1096 * @hw: pointer to hardware structure
887 1097 *
888 1098 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
889 1099 * ixgbe_hw struct in order to set up EEPROM access.
890 1100 **/
891 1101 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
892 1102 {
893 1103 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
894 1104 u32 eec;
895 1105 u16 eeprom_size;
896 1106
897 1107 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
898 1108
899 1109 if (eeprom->type == ixgbe_eeprom_uninitialized) {
900 1110 eeprom->type = ixgbe_eeprom_none;
901 1111 /* Set default semaphore delay to 10ms which is a well
902 1112 * tested value */
903 1113 eeprom->semaphore_delay = 10;
904 1114 /* Clear EEPROM page size, it will be initialized as needed */
905 1115 eeprom->word_page_size = 0;
906 1116
907 1117 /*
908 1118 * Check for EEPROM present first.
909 1119 * If not present leave as none
910 1120 */
911 1121 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
912 1122 if (eec & IXGBE_EEC_PRES) {
913 1123 eeprom->type = ixgbe_eeprom_spi;
914 1124
915 1125 /*
916 1126 * SPI EEPROM is assumed here. This code would need to
917 1127 * change if a future EEPROM is not SPI.
918 1128 */
919 1129 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
920 1130 IXGBE_EEC_SIZE_SHIFT);
921 1131 eeprom->word_size = 1 << (eeprom_size +
922 1132 IXGBE_EEPROM_WORD_SIZE_SHIFT);
923 1133 }
924 1134
925 1135 if (eec & IXGBE_EEC_ADDR_SIZE)
926 1136 eeprom->address_bits = 16;
927 1137 else
928 1138 eeprom->address_bits = 8;
929 1139 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
930 1140 "%d\n", eeprom->type, eeprom->word_size,
931 1141 eeprom->address_bits);
932 1142 }
933 1143
934 1144 return IXGBE_SUCCESS;
935 1145 }
936 1146
937 1147 /**
938 1148 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
939 1149 * @hw: pointer to hardware structure
940 1150 * @offset: offset within the EEPROM to write
941 1151 * @words: number of word(s)
942 1152 * @data: 16 bit word(s) to write to EEPROM
943 1153 *
944 1154 * Reads 16 bit word(s) from EEPROM through bit-bang method
945 1155 **/
946 1156 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
947 1157 u16 words, u16 *data)
948 1158 {
949 1159 s32 status = IXGBE_SUCCESS;
950 1160 u16 i, count;
951 1161
952 1162 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
953 1163
954 1164 hw->eeprom.ops.init_params(hw);
955 1165
956 1166 if (words == 0) {
957 1167 status = IXGBE_ERR_INVALID_ARGUMENT;
958 1168 goto out;
959 1169 }
960 1170
961 1171 if (offset + words > hw->eeprom.word_size) {
962 1172 status = IXGBE_ERR_EEPROM;
963 1173 goto out;
964 1174 }
965 1175
966 1176 /*
967 1177 * The EEPROM page size cannot be queried from the chip. We do lazy
968 1178 * initialization. It is worth to do that when we write large buffer.
969 1179 */
970 1180 if ((hw->eeprom.word_page_size == 0) &&
971 1181 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
972 1182 status = ixgbe_detect_eeprom_page_size_generic(hw, offset);
973 1183 if (status != IXGBE_SUCCESS)
974 1184 goto out;
975 1185
976 1186 /*
977 1187 * We cannot hold synchronization semaphores for too long
978 1188 * to avoid other entity starvation. However it is more efficient
979 1189 * to read in bursts than synchronizing access for each word.
980 1190 */
981 1191 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
982 1192 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
983 1193 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
984 1194 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
985 1195 count, &data[i]);
986 1196
987 1197 if (status != IXGBE_SUCCESS)
988 1198 break;
989 1199 }
990 1200
991 1201 out:
992 1202 return status;
993 1203 }
994 1204
995 1205 /**
996 1206 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
997 1207 * @hw: pointer to hardware structure
998 1208 * @offset: offset within the EEPROM to be written to
999 1209 * @words: number of word(s)
1000 1210 * @data: 16 bit word(s) to be written to the EEPROM
1001 1211 *
1002 1212 * If ixgbe_eeprom_update_checksum is not called after this function, the
1003 1213 * EEPROM will most likely contain an invalid checksum.
1004 1214 **/
1005 1215 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1006 1216 u16 words, u16 *data)
1007 1217 {
1008 1218 s32 status;
1009 1219 u16 word;
1010 1220 u16 page_size;
1011 1221 u16 i;
1012 1222 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1013 1223
1014 1224 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1015 1225
1016 1226 /* Prepare the EEPROM for writing */
1017 1227 status = ixgbe_acquire_eeprom(hw);
1018 1228
1019 1229 if (status == IXGBE_SUCCESS) {
1020 1230 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1021 1231 ixgbe_release_eeprom(hw);
1022 1232 status = IXGBE_ERR_EEPROM;
1023 1233 }
1024 1234 }
1025 1235
1026 1236 if (status == IXGBE_SUCCESS) {
1027 1237 for (i = 0; i < words; i++) {
1028 1238 ixgbe_standby_eeprom(hw);
1029 1239
1030 1240 /* Send the WRITE ENABLE command (8 bit opcode ) */
1031 1241 ixgbe_shift_out_eeprom_bits(hw,
1032 1242 IXGBE_EEPROM_WREN_OPCODE_SPI,
1033 1243 IXGBE_EEPROM_OPCODE_BITS);
1034 1244
1035 1245 ixgbe_standby_eeprom(hw);
1036 1246
1037 1247 /*
1038 1248 * Some SPI eeproms use the 8th address bit embedded
1039 1249 * in the opcode
1040 1250 */
1041 1251 if ((hw->eeprom.address_bits == 8) &&
1042 1252 ((offset + i) >= 128))
1043 1253 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1044 1254
1045 1255 /* Send the Write command (8-bit opcode + addr) */
1046 1256 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1047 1257 IXGBE_EEPROM_OPCODE_BITS);
1048 1258 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1049 1259 hw->eeprom.address_bits);
1050 1260
1051 1261 page_size = hw->eeprom.word_page_size;
1052 1262
1053 1263 /* Send the data in burst via SPI*/
1054 1264 do {
1055 1265 word = data[i];
1056 1266 word = (word >> 8) | (word << 8);
1057 1267 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1058 1268
1059 1269 if (page_size == 0)
1060 1270 break;
1061 1271
1062 1272 /* do not wrap around page */
1063 1273 if (((offset + i) & (page_size - 1)) ==
1064 1274 (page_size - 1))
1065 1275 break;
1066 1276 } while (++i < words);
1067 1277
1068 1278 ixgbe_standby_eeprom(hw);
1069 1279 msec_delay(10);
1070 1280 }
1071 1281 /* Done with writing - release the EEPROM */
1072 1282 ixgbe_release_eeprom(hw);
1073 1283 }
1074 1284
1075 1285 return status;
1076 1286 }
1077 1287
1078 1288 /**
1079 1289 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1080 1290 * @hw: pointer to hardware structure
1081 1291 * @offset: offset within the EEPROM to be written to
1082 1292 * @data: 16 bit word to be written to the EEPROM
1083 1293 *
1084 1294 * If ixgbe_eeprom_update_checksum is not called after this function, the
1085 1295 * EEPROM will most likely contain an invalid checksum.
1086 1296 **/
1087 1297 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1088 1298 {
1089 1299 s32 status;
1090 1300
1091 1301 DEBUGFUNC("ixgbe_write_eeprom_generic");
1092 1302
1093 1303 hw->eeprom.ops.init_params(hw);
1094 1304
1095 1305 if (offset >= hw->eeprom.word_size) {
1096 1306 status = IXGBE_ERR_EEPROM;
1097 1307 goto out;
1098 1308 }
1099 1309
1100 1310 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1101 1311
1102 1312 out:
1103 1313 return status;
1104 1314 }
1105 1315
1106 1316 /**
1107 1317 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1108 1318 * @hw: pointer to hardware structure
1109 1319 * @offset: offset within the EEPROM to be read
1110 1320 * @data: read 16 bit words(s) from EEPROM
1111 1321 * @words: number of word(s)
1112 1322 *
1113 1323 * Reads 16 bit word(s) from EEPROM through bit-bang method
1114 1324 **/
1115 1325 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1116 1326 u16 words, u16 *data)
1117 1327 {
1118 1328 s32 status = IXGBE_SUCCESS;
1119 1329 u16 i, count;
1120 1330
1121 1331 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1122 1332
1123 1333 hw->eeprom.ops.init_params(hw);
1124 1334
1125 1335 if (words == 0) {
1126 1336 status = IXGBE_ERR_INVALID_ARGUMENT;
1127 1337 goto out;
1128 1338 }
1129 1339
1130 1340 if (offset + words > hw->eeprom.word_size) {
1131 1341 status = IXGBE_ERR_EEPROM;
1132 1342 goto out;
1133 1343 }
1134 1344
1135 1345 /*
1136 1346 * We cannot hold synchronization semaphores for too long
1137 1347 * to avoid other entity starvation. However it is more efficient
1138 1348 * to read in bursts than synchronizing access for each word.
1139 1349 */
1140 1350 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1141 1351 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1142 1352 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1143 1353
1144 1354 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1145 1355 count, &data[i]);
1146 1356
1147 1357 if (status != IXGBE_SUCCESS)
1148 1358 break;
1149 1359 }
1150 1360
1151 1361 out:
1152 1362 return status;
1153 1363 }
1154 1364
1155 1365 /**
1156 1366 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1157 1367 * @hw: pointer to hardware structure
1158 1368 * @offset: offset within the EEPROM to be read
1159 1369 * @words: number of word(s)
1160 1370 * @data: read 16 bit word(s) from EEPROM
1161 1371 *
1162 1372 * Reads 16 bit word(s) from EEPROM through bit-bang method
1163 1373 **/
1164 1374 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1165 1375 u16 words, u16 *data)
1166 1376 {
1167 1377 s32 status;
1168 1378 u16 word_in;
1169 1379 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1170 1380 u16 i;
1171 1381
1172 1382 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1173 1383
1174 1384 /* Prepare the EEPROM for reading */
1175 1385 status = ixgbe_acquire_eeprom(hw);
1176 1386
1177 1387 if (status == IXGBE_SUCCESS) {
1178 1388 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1179 1389 ixgbe_release_eeprom(hw);
1180 1390 status = IXGBE_ERR_EEPROM;
1181 1391 }
1182 1392 }
1183 1393
1184 1394 if (status == IXGBE_SUCCESS) {
1185 1395 for (i = 0; i < words; i++) {
1186 1396 ixgbe_standby_eeprom(hw);
1187 1397 /*
1188 1398 * Some SPI eeproms use the 8th address bit embedded
1189 1399 * in the opcode
1190 1400 */
1191 1401 if ((hw->eeprom.address_bits == 8) &&
1192 1402 ((offset + i) >= 128))
1193 1403 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1194 1404
1195 1405 /* Send the READ command (opcode + addr) */
1196 1406 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1197 1407 IXGBE_EEPROM_OPCODE_BITS);
1198 1408 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1199 1409 hw->eeprom.address_bits);
1200 1410
1201 1411 /* Read the data. */
1202 1412 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1203 1413 data[i] = (word_in >> 8) | (word_in << 8);
1204 1414 }
1205 1415
1206 1416 /* End this read operation */
1207 1417 ixgbe_release_eeprom(hw);
1208 1418 }
1209 1419
1210 1420 return status;
1211 1421 }
1212 1422
1213 1423 /**
1214 1424 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1215 1425 * @hw: pointer to hardware structure
1216 1426 * @offset: offset within the EEPROM to be read
1217 1427 * @data: read 16 bit value from EEPROM
1218 1428 *
1219 1429 * Reads 16 bit value from EEPROM through bit-bang method
1220 1430 **/
1221 1431 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1222 1432 u16 *data)
1223 1433 {
1224 1434 s32 status;
1225 1435
1226 1436 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1227 1437
1228 1438 hw->eeprom.ops.init_params(hw);
1229 1439
1230 1440 if (offset >= hw->eeprom.word_size) {
1231 1441 status = IXGBE_ERR_EEPROM;
1232 1442 goto out;
1233 1443 }
1234 1444
1235 1445 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1236 1446
1237 1447 out:
1238 1448 return status;
1239 1449 }
1240 1450
1241 1451 /**
1242 1452 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1243 1453 * @hw: pointer to hardware structure
1244 1454 * @offset: offset of word in the EEPROM to read
1245 1455 * @words: number of word(s)
1246 1456 * @data: 16 bit word(s) from the EEPROM
1247 1457 *
1248 1458 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1249 1459 **/
1250 1460 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1251 1461 u16 words, u16 *data)
1252 1462 {
1253 1463 u32 eerd;
1254 1464 s32 status = IXGBE_SUCCESS;
1255 1465 u32 i;
1256 1466
1257 1467 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1258 1468
1259 1469 hw->eeprom.ops.init_params(hw);
1260 1470
1261 1471 if (words == 0) {
|
↓ open down ↓ |
570 lines elided |
↑ open up ↑ |
1262 1472 status = IXGBE_ERR_INVALID_ARGUMENT;
1263 1473 goto out;
1264 1474 }
1265 1475
1266 1476 if (offset >= hw->eeprom.word_size) {
1267 1477 status = IXGBE_ERR_EEPROM;
1268 1478 goto out;
1269 1479 }
1270 1480
1271 1481 for (i = 0; i < words; i++) {
1272 - eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
1482 + eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1273 1483 IXGBE_EEPROM_RW_REG_START;
1274 1484
1275 1485 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1276 1486 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1277 1487
1278 1488 if (status == IXGBE_SUCCESS) {
1279 1489 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1280 1490 IXGBE_EEPROM_RW_REG_DATA);
1281 1491 } else {
1282 1492 DEBUGOUT("Eeprom read timed out\n");
1283 1493 goto out;
1284 1494 }
1285 1495 }
1286 1496 out:
1287 1497 return status;
1288 1498 }
1289 1499
1290 1500 /**
1291 1501 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1292 1502 * @hw: pointer to hardware structure
1293 1503 * @offset: offset within the EEPROM to be used as a scratch pad
1294 1504 *
1295 1505 * Discover EEPROM page size by writing marching data at given offset.
1296 1506 * This function is called only when we are writing a new large buffer
1297 1507 * at given offset so the data would be overwritten anyway.
1298 1508 **/
1299 1509 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1300 1510 u16 offset)
1301 1511 {
1302 1512 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1303 1513 s32 status = IXGBE_SUCCESS;
1304 1514 u16 i;
1305 1515
1306 1516 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1307 1517
1308 1518 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1309 1519 data[i] = i;
1310 1520
1311 1521 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1312 1522 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1313 1523 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1314 1524 hw->eeprom.word_page_size = 0;
1315 1525 if (status != IXGBE_SUCCESS)
1316 1526 goto out;
1317 1527
1318 1528 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1319 1529 if (status != IXGBE_SUCCESS)
1320 1530 goto out;
1321 1531
1322 1532 /*
1323 1533 * When writing in burst more than the actual page size
1324 1534 * EEPROM address wraps around current page.
1325 1535 */
1326 1536 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1327 1537
1328 1538 DEBUGOUT1("Detected EEPROM page size = %d words.",
1329 1539 hw->eeprom.word_page_size);
1330 1540 out:
1331 1541 return status;
1332 1542 }
1333 1543
1334 1544 /**
1335 1545 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1336 1546 * @hw: pointer to hardware structure
1337 1547 * @offset: offset of word in the EEPROM to read
1338 1548 * @data: word read from the EEPROM
1339 1549 *
1340 1550 * Reads a 16 bit word from the EEPROM using the EERD register.
1341 1551 **/
1342 1552 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1343 1553 {
1344 1554 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1345 1555 }
1346 1556
1347 1557 /**
1348 1558 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1349 1559 * @hw: pointer to hardware structure
1350 1560 * @offset: offset of word in the EEPROM to write
1351 1561 * @words: number of word(s)
1352 1562 * @data: word(s) write to the EEPROM
1353 1563 *
1354 1564 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1355 1565 **/
1356 1566 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1357 1567 u16 words, u16 *data)
1358 1568 {
1359 1569 u32 eewr;
1360 1570 s32 status = IXGBE_SUCCESS;
1361 1571 u16 i;
1362 1572
1363 1573 DEBUGFUNC("ixgbe_write_eewr_generic");
1364 1574
1365 1575 hw->eeprom.ops.init_params(hw);
1366 1576
1367 1577 if (words == 0) {
1368 1578 status = IXGBE_ERR_INVALID_ARGUMENT;
1369 1579 goto out;
1370 1580 }
1371 1581
1372 1582 if (offset >= hw->eeprom.word_size) {
1373 1583 status = IXGBE_ERR_EEPROM;
1374 1584 goto out;
1375 1585 }
1376 1586
1377 1587 for (i = 0; i < words; i++) {
1378 1588 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1379 1589 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1380 1590 IXGBE_EEPROM_RW_REG_START;
1381 1591
1382 1592 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1383 1593 if (status != IXGBE_SUCCESS) {
1384 1594 DEBUGOUT("Eeprom write EEWR timed out\n");
1385 1595 goto out;
1386 1596 }
1387 1597
1388 1598 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1389 1599
1390 1600 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1391 1601 if (status != IXGBE_SUCCESS) {
1392 1602 DEBUGOUT("Eeprom write EEWR timed out\n");
1393 1603 goto out;
1394 1604 }
1395 1605 }
1396 1606
1397 1607 out:
1398 1608 return status;
1399 1609 }
1400 1610
1401 1611 /**
1402 1612 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1403 1613 * @hw: pointer to hardware structure
1404 1614 * @offset: offset of word in the EEPROM to write
1405 1615 * @data: word write to the EEPROM
1406 1616 *
1407 1617 * Write a 16 bit word to the EEPROM using the EEWR register.
1408 1618 **/
1409 1619 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1410 1620 {
1411 1621 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1412 1622 }
1413 1623
1414 1624 /**
1415 1625 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1416 1626 * @hw: pointer to hardware structure
1417 1627 * @ee_reg: EEPROM flag for polling
1418 1628 *
1419 1629 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1420 1630 * read or write is done respectively.
1421 1631 **/
1422 1632 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1423 1633 {
1424 1634 u32 i;
1425 1635 u32 reg;
1426 1636 s32 status = IXGBE_ERR_EEPROM;
1427 1637
1428 1638 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1429 1639
1430 1640 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1431 1641 if (ee_reg == IXGBE_NVM_POLL_READ)
1432 1642 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1433 1643 else
1434 1644 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1435 1645
1436 1646 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1437 1647 status = IXGBE_SUCCESS;
1438 1648 break;
1439 1649 }
1440 1650 usec_delay(5);
1441 1651 }
1442 1652 return status;
1443 1653 }
1444 1654
1445 1655 /**
1446 1656 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1447 1657 * @hw: pointer to hardware structure
1448 1658 *
1449 1659 * Prepares EEPROM for access using bit-bang method. This function should
1450 1660 * be called before issuing a command to the EEPROM.
1451 1661 **/
1452 1662 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1453 1663 {
1454 1664 s32 status = IXGBE_SUCCESS;
1455 1665 u32 eec;
1456 1666 u32 i;
1457 1667
1458 1668 DEBUGFUNC("ixgbe_acquire_eeprom");
1459 1669
1460 1670 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1461 1671 != IXGBE_SUCCESS)
1462 1672 status = IXGBE_ERR_SWFW_SYNC;
1463 1673
1464 1674 if (status == IXGBE_SUCCESS) {
1465 1675 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1466 1676
1467 1677 /* Request EEPROM Access */
1468 1678 eec |= IXGBE_EEC_REQ;
1469 1679 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1470 1680
1471 1681 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1472 1682 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1473 1683 if (eec & IXGBE_EEC_GNT)
1474 1684 break;
1475 1685 usec_delay(5);
1476 1686 }
1477 1687
1478 1688 /* Release if grant not acquired */
1479 1689 if (!(eec & IXGBE_EEC_GNT)) {
1480 1690 eec &= ~IXGBE_EEC_REQ;
1481 1691 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1482 1692 DEBUGOUT("Could not acquire EEPROM grant\n");
1483 1693
1484 1694 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1485 1695 status = IXGBE_ERR_EEPROM;
1486 1696 }
1487 1697
1488 1698 /* Setup EEPROM for Read/Write */
1489 1699 if (status == IXGBE_SUCCESS) {
1490 1700 /* Clear CS and SK */
1491 1701 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1492 1702 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1493 1703 IXGBE_WRITE_FLUSH(hw);
1494 1704 usec_delay(1);
1495 1705 }
1496 1706 }
1497 1707 return status;
1498 1708 }
1499 1709
1500 1710 /**
1501 1711 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1502 1712 * @hw: pointer to hardware structure
1503 1713 *
1504 1714 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1505 1715 **/
1506 1716 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1507 1717 {
1508 1718 s32 status = IXGBE_ERR_EEPROM;
1509 1719 u32 timeout = 2000;
1510 1720 u32 i;
1511 1721 u32 swsm;
1512 1722
1513 1723 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1514 1724
1515 1725
1516 1726 /* Get SMBI software semaphore between device drivers first */
1517 1727 for (i = 0; i < timeout; i++) {
1518 1728 /*
1519 1729 * If the SMBI bit is 0 when we read it, then the bit will be
1520 1730 * set and we have the semaphore
1521 1731 */
1522 1732 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1523 1733 if (!(swsm & IXGBE_SWSM_SMBI)) {
1524 1734 status = IXGBE_SUCCESS;
1525 1735 break;
1526 1736 }
1527 1737 usec_delay(50);
1528 1738 }
1529 1739
1530 1740 if (i == timeout) {
1531 1741 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1532 1742 "not granted.\n");
1533 1743 /*
1534 1744 * this release is particularly important because our attempts
1535 1745 * above to get the semaphore may have succeeded, and if there
1536 1746 * was a timeout, we should unconditionally clear the semaphore
1537 1747 * bits to free the driver to make progress
1538 1748 */
1539 1749 ixgbe_release_eeprom_semaphore(hw);
1540 1750
1541 1751 usec_delay(50);
1542 1752 /*
1543 1753 * one last try
1544 1754 * If the SMBI bit is 0 when we read it, then the bit will be
1545 1755 * set and we have the semaphore
1546 1756 */
1547 1757 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1548 1758 if (!(swsm & IXGBE_SWSM_SMBI))
1549 1759 status = IXGBE_SUCCESS;
1550 1760 }
1551 1761
1552 1762 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1553 1763 if (status == IXGBE_SUCCESS) {
1554 1764 for (i = 0; i < timeout; i++) {
1555 1765 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1556 1766
1557 1767 /* Set the SW EEPROM semaphore bit to request access */
1558 1768 swsm |= IXGBE_SWSM_SWESMBI;
1559 1769 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1560 1770
1561 1771 /*
1562 1772 * If we set the bit successfully then we got the
1563 1773 * semaphore.
1564 1774 */
1565 1775 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1566 1776 if (swsm & IXGBE_SWSM_SWESMBI)
1567 1777 break;
1568 1778
1569 1779 usec_delay(50);
1570 1780 }
1571 1781
1572 1782 /*
1573 1783 * Release semaphores and return error if SW EEPROM semaphore
1574 1784 * was not granted because we don't have access to the EEPROM
1575 1785 */
1576 1786 if (i >= timeout) {
1577 1787 DEBUGOUT("SWESMBI Software EEPROM semaphore "
1578 1788 "not granted.\n");
1579 1789 ixgbe_release_eeprom_semaphore(hw);
1580 1790 status = IXGBE_ERR_EEPROM;
1581 1791 }
1582 1792 } else {
1583 1793 DEBUGOUT("Software semaphore SMBI between device drivers "
1584 1794 "not granted.\n");
1585 1795 }
1586 1796
1587 1797 return status;
1588 1798 }
1589 1799
1590 1800 /**
1591 1801 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1592 1802 * @hw: pointer to hardware structure
1593 1803 *
1594 1804 * This function clears hardware semaphore bits.
1595 1805 **/
1596 1806 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1597 1807 {
1598 1808 u32 swsm;
1599 1809
1600 1810 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1601 1811
1602 1812 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1603 1813
1604 1814 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1605 1815 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1606 1816 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1607 1817 IXGBE_WRITE_FLUSH(hw);
1608 1818 }
1609 1819
1610 1820 /**
1611 1821 * ixgbe_ready_eeprom - Polls for EEPROM ready
1612 1822 * @hw: pointer to hardware structure
1613 1823 **/
1614 1824 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1615 1825 {
1616 1826 s32 status = IXGBE_SUCCESS;
1617 1827 u16 i;
1618 1828 u8 spi_stat_reg;
1619 1829
1620 1830 DEBUGFUNC("ixgbe_ready_eeprom");
1621 1831
1622 1832 /*
1623 1833 * Read "Status Register" repeatedly until the LSB is cleared. The
1624 1834 * EEPROM will signal that the command has been completed by clearing
1625 1835 * bit 0 of the internal status register. If it's not cleared within
1626 1836 * 5 milliseconds, then error out.
1627 1837 */
1628 1838 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1629 1839 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1630 1840 IXGBE_EEPROM_OPCODE_BITS);
1631 1841 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1632 1842 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1633 1843 break;
1634 1844
1635 1845 usec_delay(5);
1636 1846 ixgbe_standby_eeprom(hw);
1637 1847 };
1638 1848
1639 1849 /*
1640 1850 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1641 1851 * devices (and only 0-5mSec on 5V devices)
1642 1852 */
1643 1853 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1644 1854 DEBUGOUT("SPI EEPROM Status error\n");
1645 1855 status = IXGBE_ERR_EEPROM;
1646 1856 }
1647 1857
1648 1858 return status;
1649 1859 }
1650 1860
1651 1861 /**
1652 1862 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1653 1863 * @hw: pointer to hardware structure
1654 1864 **/
1655 1865 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1656 1866 {
1657 1867 u32 eec;
1658 1868
1659 1869 DEBUGFUNC("ixgbe_standby_eeprom");
1660 1870
1661 1871 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1662 1872
1663 1873 /* Toggle CS to flush commands */
1664 1874 eec |= IXGBE_EEC_CS;
1665 1875 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1666 1876 IXGBE_WRITE_FLUSH(hw);
1667 1877 usec_delay(1);
1668 1878 eec &= ~IXGBE_EEC_CS;
1669 1879 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1670 1880 IXGBE_WRITE_FLUSH(hw);
1671 1881 usec_delay(1);
1672 1882 }
1673 1883
1674 1884 /**
1675 1885 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1676 1886 * @hw: pointer to hardware structure
1677 1887 * @data: data to send to the EEPROM
1678 1888 * @count: number of bits to shift out
1679 1889 **/
1680 1890 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1681 1891 u16 count)
1682 1892 {
1683 1893 u32 eec;
1684 1894 u32 mask;
1685 1895 u32 i;
1686 1896
1687 1897 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1688 1898
1689 1899 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1690 1900
1691 1901 /*
1692 1902 * Mask is used to shift "count" bits of "data" out to the EEPROM
1693 1903 * one bit at a time. Determine the starting bit based on count
1694 1904 */
1695 1905 mask = 0x01 << (count - 1);
1696 1906
1697 1907 for (i = 0; i < count; i++) {
1698 1908 /*
1699 1909 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1700 1910 * "1", and then raising and then lowering the clock (the SK
1701 1911 * bit controls the clock input to the EEPROM). A "0" is
1702 1912 * shifted out to the EEPROM by setting "DI" to "0" and then
1703 1913 * raising and then lowering the clock.
1704 1914 */
1705 1915 if (data & mask)
1706 1916 eec |= IXGBE_EEC_DI;
1707 1917 else
1708 1918 eec &= ~IXGBE_EEC_DI;
1709 1919
1710 1920 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1711 1921 IXGBE_WRITE_FLUSH(hw);
1712 1922
1713 1923 usec_delay(1);
1714 1924
1715 1925 ixgbe_raise_eeprom_clk(hw, &eec);
1716 1926 ixgbe_lower_eeprom_clk(hw, &eec);
1717 1927
1718 1928 /*
1719 1929 * Shift mask to signify next bit of data to shift in to the
1720 1930 * EEPROM
1721 1931 */
1722 1932 mask = mask >> 1;
1723 1933 };
1724 1934
1725 1935 /* We leave the "DI" bit set to "0" when we leave this routine. */
1726 1936 eec &= ~IXGBE_EEC_DI;
1727 1937 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1728 1938 IXGBE_WRITE_FLUSH(hw);
1729 1939 }
1730 1940
1731 1941 /**
1732 1942 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1733 1943 * @hw: pointer to hardware structure
1734 1944 **/
1735 1945 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1736 1946 {
1737 1947 u32 eec;
1738 1948 u32 i;
1739 1949 u16 data = 0;
1740 1950
1741 1951 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1742 1952
1743 1953 /*
1744 1954 * In order to read a register from the EEPROM, we need to shift
1745 1955 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1746 1956 * the clock input to the EEPROM (setting the SK bit), and then reading
1747 1957 * the value of the "DO" bit. During this "shifting in" process the
1748 1958 * "DI" bit should always be clear.
1749 1959 */
1750 1960 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1751 1961
1752 1962 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1753 1963
1754 1964 for (i = 0; i < count; i++) {
1755 1965 data = data << 1;
1756 1966 ixgbe_raise_eeprom_clk(hw, &eec);
1757 1967
1758 1968 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1759 1969
1760 1970 eec &= ~(IXGBE_EEC_DI);
1761 1971 if (eec & IXGBE_EEC_DO)
1762 1972 data |= 1;
1763 1973
1764 1974 ixgbe_lower_eeprom_clk(hw, &eec);
1765 1975 }
1766 1976
1767 1977 return data;
1768 1978 }
1769 1979
1770 1980 /**
1771 1981 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1772 1982 * @hw: pointer to hardware structure
1773 1983 * @eec: EEC register's current value
1774 1984 **/
1775 1985 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1776 1986 {
1777 1987 DEBUGFUNC("ixgbe_raise_eeprom_clk");
1778 1988
1779 1989 /*
1780 1990 * Raise the clock input to the EEPROM
1781 1991 * (setting the SK bit), then delay
1782 1992 */
1783 1993 *eec = *eec | IXGBE_EEC_SK;
1784 1994 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1785 1995 IXGBE_WRITE_FLUSH(hw);
1786 1996 usec_delay(1);
1787 1997 }
1788 1998
1789 1999 /**
1790 2000 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1791 2001 * @hw: pointer to hardware structure
1792 2002 * @eecd: EECD's current value
1793 2003 **/
1794 2004 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1795 2005 {
1796 2006 DEBUGFUNC("ixgbe_lower_eeprom_clk");
1797 2007
1798 2008 /*
1799 2009 * Lower the clock input to the EEPROM (clearing the SK bit), then
1800 2010 * delay
1801 2011 */
1802 2012 *eec = *eec & ~IXGBE_EEC_SK;
1803 2013 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1804 2014 IXGBE_WRITE_FLUSH(hw);
1805 2015 usec_delay(1);
1806 2016 }
1807 2017
1808 2018 /**
1809 2019 * ixgbe_release_eeprom - Release EEPROM, release semaphores
1810 2020 * @hw: pointer to hardware structure
1811 2021 **/
1812 2022 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1813 2023 {
1814 2024 u32 eec;
1815 2025
1816 2026 DEBUGFUNC("ixgbe_release_eeprom");
1817 2027
1818 2028 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1819 2029
1820 2030 eec |= IXGBE_EEC_CS; /* Pull CS high */
1821 2031 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1822 2032
1823 2033 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1824 2034 IXGBE_WRITE_FLUSH(hw);
1825 2035
1826 2036 usec_delay(1);
1827 2037
1828 2038 /* Stop requesting EEPROM access */
1829 2039 eec &= ~IXGBE_EEC_REQ;
1830 2040 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1831 2041
1832 2042 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1833 2043
1834 2044 /* Delay before attempt to obtain semaphore again to allow FW access */
1835 2045 msec_delay(hw->eeprom.semaphore_delay);
1836 2046 }
1837 2047
1838 2048 /**
1839 2049 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1840 2050 * @hw: pointer to hardware structure
1841 2051 **/
1842 2052 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1843 2053 {
1844 2054 u16 i;
1845 2055 u16 j;
1846 2056 u16 checksum = 0;
1847 2057 u16 length = 0;
1848 2058 u16 pointer = 0;
1849 2059 u16 word = 0;
1850 2060
1851 2061 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1852 2062
1853 2063 /* Include 0x0-0x3F in the checksum */
1854 2064 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1855 2065 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
1856 2066 DEBUGOUT("EEPROM read failed\n");
1857 2067 break;
1858 2068 }
1859 2069 checksum += word;
1860 2070 }
1861 2071
1862 2072 /* Include all data from pointers except for the fw pointer */
1863 2073 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1864 2074 hw->eeprom.ops.read(hw, i, &pointer);
1865 2075
1866 2076 /* Make sure the pointer seems valid */
1867 2077 if (pointer != 0xFFFF && pointer != 0) {
1868 2078 hw->eeprom.ops.read(hw, pointer, &length);
1869 2079
1870 2080 if (length != 0xFFFF && length != 0) {
1871 2081 for (j = pointer+1; j <= pointer+length; j++) {
1872 2082 hw->eeprom.ops.read(hw, j, &word);
1873 2083 checksum += word;
1874 2084 }
1875 2085 }
1876 2086 }
1877 2087 }
1878 2088
1879 2089 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1880 2090
1881 2091 return checksum;
1882 2092 }
1883 2093
1884 2094 /**
1885 2095 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1886 2096 * @hw: pointer to hardware structure
1887 2097 * @checksum_val: calculated checksum
1888 2098 *
1889 2099 * Performs checksum calculation and validates the EEPROM checksum. If the
1890 2100 * caller does not need checksum_val, the value can be NULL.
1891 2101 **/
1892 2102 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1893 2103 u16 *checksum_val)
1894 2104 {
1895 2105 s32 status;
1896 2106 u16 checksum;
1897 2107 u16 read_checksum = 0;
1898 2108
1899 2109 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1900 2110
1901 2111 /*
1902 2112 * Read the first word from the EEPROM. If this times out or fails, do
1903 2113 * not continue or we could be in for a very long wait while every
1904 2114 * EEPROM read fails
1905 2115 */
1906 2116 status = hw->eeprom.ops.read(hw, 0, &checksum);
1907 2117
1908 2118 if (status == IXGBE_SUCCESS) {
1909 2119 checksum = hw->eeprom.ops.calc_checksum(hw);
1910 2120
1911 2121 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1912 2122
1913 2123 /*
1914 2124 * Verify read checksum from EEPROM is the same as
1915 2125 * calculated checksum
1916 2126 */
1917 2127 if (read_checksum != checksum)
1918 2128 status = IXGBE_ERR_EEPROM_CHECKSUM;
1919 2129
1920 2130 /* If the user cares, return the calculated checksum */
1921 2131 if (checksum_val)
1922 2132 *checksum_val = checksum;
1923 2133 } else {
1924 2134 DEBUGOUT("EEPROM read failed\n");
1925 2135 }
1926 2136
1927 2137 return status;
1928 2138 }
1929 2139
1930 2140 /**
1931 2141 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1932 2142 * @hw: pointer to hardware structure
1933 2143 **/
1934 2144 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1935 2145 {
1936 2146 s32 status;
1937 2147 u16 checksum;
1938 2148
1939 2149 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1940 2150
1941 2151 /*
1942 2152 * Read the first word from the EEPROM. If this times out or fails, do
1943 2153 * not continue or we could be in for a very long wait while every
1944 2154 * EEPROM read fails
1945 2155 */
1946 2156 status = hw->eeprom.ops.read(hw, 0, &checksum);
1947 2157
1948 2158 if (status == IXGBE_SUCCESS) {
1949 2159 checksum = hw->eeprom.ops.calc_checksum(hw);
1950 2160 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1951 2161 checksum);
1952 2162 } else {
1953 2163 DEBUGOUT("EEPROM read failed\n");
1954 2164 }
1955 2165
1956 2166 return status;
1957 2167 }
1958 2168
1959 2169 /**
1960 2170 * ixgbe_validate_mac_addr - Validate MAC address
1961 2171 * @mac_addr: pointer to MAC address.
1962 2172 *
1963 2173 * Tests a MAC address to ensure it is a valid Individual Address
1964 2174 **/
1965 2175 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
1966 2176 {
1967 2177 s32 status = IXGBE_SUCCESS;
1968 2178
1969 2179 DEBUGFUNC("ixgbe_validate_mac_addr");
1970 2180
1971 2181 /* Make sure it is not a multicast address */
1972 2182 if (IXGBE_IS_MULTICAST(mac_addr)) {
1973 2183 DEBUGOUT("MAC address is multicast\n");
1974 2184 status = IXGBE_ERR_INVALID_MAC_ADDR;
1975 2185 /* Not a broadcast address */
1976 2186 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
1977 2187 DEBUGOUT("MAC address is broadcast\n");
1978 2188 status = IXGBE_ERR_INVALID_MAC_ADDR;
1979 2189 /* Reject the zero address */
1980 2190 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1981 2191 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1982 2192 DEBUGOUT("MAC address is all zeros\n");
1983 2193 status = IXGBE_ERR_INVALID_MAC_ADDR;
1984 2194 }
1985 2195 return status;
1986 2196 }
1987 2197
1988 2198 /**
1989 2199 * ixgbe_set_rar_generic - Set Rx address register
1990 2200 * @hw: pointer to hardware structure
1991 2201 * @index: Receive address register to write
1992 2202 * @addr: Address to put into receive address register
1993 2203 * @vmdq: VMDq "set" or "pool" index
1994 2204 * @enable_addr: set flag that address is active
1995 2205 *
1996 2206 * Puts an ethernet address into a receive address register.
1997 2207 **/
1998 2208 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1999 2209 u32 enable_addr)
2000 2210 {
2001 2211 u32 rar_low, rar_high;
2002 2212 u32 rar_entries = hw->mac.num_rar_entries;
2003 2213
2004 2214 DEBUGFUNC("ixgbe_set_rar_generic");
2005 2215
2006 2216 /* Make sure we are using a valid rar index range */
2007 2217 if (index >= rar_entries) {
2008 2218 DEBUGOUT1("RAR index %d is out of range.\n", index);
2009 2219 return IXGBE_ERR_INVALID_ARGUMENT;
2010 2220 }
2011 2221
2012 2222 /* setup VMDq pool selection before this RAR gets enabled */
2013 2223 hw->mac.ops.set_vmdq(hw, index, vmdq);
2014 2224
2015 2225 /*
2016 2226 * HW expects these in little endian so we reverse the byte
2017 2227 * order from network order (big endian) to little endian
2018 2228 */
2019 2229 rar_low = ((u32)addr[0] |
2020 2230 ((u32)addr[1] << 8) |
2021 2231 ((u32)addr[2] << 16) |
2022 2232 ((u32)addr[3] << 24));
2023 2233 /*
2024 2234 * Some parts put the VMDq setting in the extra RAH bits,
2025 2235 * so save everything except the lower 16 bits that hold part
2026 2236 * of the address and the address valid bit.
2027 2237 */
2028 2238 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2029 2239 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2030 2240 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2031 2241
2032 2242 if (enable_addr != 0)
2033 2243 rar_high |= IXGBE_RAH_AV;
2034 2244
2035 2245 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2036 2246 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2037 2247
2038 2248 return IXGBE_SUCCESS;
2039 2249 }
2040 2250
2041 2251 /**
2042 2252 * ixgbe_clear_rar_generic - Remove Rx address register
2043 2253 * @hw: pointer to hardware structure
2044 2254 * @index: Receive address register to write
2045 2255 *
2046 2256 * Clears an ethernet address from a receive address register.
2047 2257 **/
2048 2258 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2049 2259 {
2050 2260 u32 rar_high;
2051 2261 u32 rar_entries = hw->mac.num_rar_entries;
2052 2262
2053 2263 DEBUGFUNC("ixgbe_clear_rar_generic");
2054 2264
2055 2265 /* Make sure we are using a valid rar index range */
2056 2266 if (index >= rar_entries) {
2057 2267 DEBUGOUT1("RAR index %d is out of range.\n", index);
2058 2268 return IXGBE_ERR_INVALID_ARGUMENT;
2059 2269 }
2060 2270
2061 2271 /*
2062 2272 * Some parts put the VMDq setting in the extra RAH bits,
2063 2273 * so save everything except the lower 16 bits that hold part
2064 2274 * of the address and the address valid bit.
2065 2275 */
2066 2276 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2067 2277 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2068 2278
2069 2279 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2070 2280 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2071 2281
2072 2282 /* clear VMDq pool/queue selection for this RAR */
2073 2283 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2074 2284
2075 2285 return IXGBE_SUCCESS;
2076 2286 }
2077 2287
2078 2288 /**
2079 2289 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2080 2290 * @hw: pointer to hardware structure
2081 2291 *
2082 2292 * Places the MAC address in receive address register 0 and clears the rest
2083 2293 * of the receive address registers. Clears the multicast table. Assumes
2084 2294 * the receiver is in reset when the routine is called.
2085 2295 **/
2086 2296 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2087 2297 {
2088 2298 u32 i;
2089 2299 u32 rar_entries = hw->mac.num_rar_entries;
2090 2300
2091 2301 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2092 2302
2093 2303 /*
2094 2304 * If the current mac address is valid, assume it is a software override
2095 2305 * to the permanent address.
2096 2306 * Otherwise, use the permanent address from the eeprom.
2097 2307 */
2098 2308 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2099 2309 IXGBE_ERR_INVALID_MAC_ADDR) {
2100 2310 /* Get the MAC address from the RAR0 for later reference */
2101 2311 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2102 2312
2103 2313 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2104 2314 hw->mac.addr[0], hw->mac.addr[1],
2105 2315 hw->mac.addr[2]);
2106 2316 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2107 2317 hw->mac.addr[4], hw->mac.addr[5]);
2108 2318 } else {
2109 2319 /* Setup the receive address. */
2110 2320 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2111 2321 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2112 2322 hw->mac.addr[0], hw->mac.addr[1],
2113 2323 hw->mac.addr[2]);
2114 2324 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2115 2325 hw->mac.addr[4], hw->mac.addr[5]);
2116 2326
2117 2327 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2118 2328
2119 2329 /* clear VMDq pool/queue selection for RAR 0 */
2120 2330 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2121 2331 }
2122 2332 hw->addr_ctrl.overflow_promisc = 0;
2123 2333
2124 2334 hw->addr_ctrl.rar_used_count = 1;
2125 2335
2126 2336 /* Zero out the other receive addresses. */
2127 2337 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2128 2338 for (i = 1; i < rar_entries; i++) {
2129 2339 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2130 2340 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2131 2341 }
2132 2342
2133 2343 /* Clear the MTA */
2134 2344 hw->addr_ctrl.mta_in_use = 0;
2135 2345 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2136 2346
2137 2347 DEBUGOUT(" Clearing MTA\n");
2138 2348 for (i = 0; i < hw->mac.mcft_size; i++)
2139 2349 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2140 2350
2141 2351 /* Should always be IXGBE_SUCCESS. */
2142 2352 return ixgbe_init_uta_tables(hw);
2143 2353 }
2144 2354
2145 2355 /**
2146 2356 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2147 2357 * @hw: pointer to hardware structure
2148 2358 * @addr: new address
2149 2359 *
2150 2360 * Adds it to unused receive address register or goes into promiscuous mode.
2151 2361 **/
2152 2362 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2153 2363 {
2154 2364 u32 rar_entries = hw->mac.num_rar_entries;
2155 2365 u32 rar;
2156 2366
2157 2367 DEBUGFUNC("ixgbe_add_uc_addr");
2158 2368
2159 2369 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2160 2370 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2161 2371
2162 2372 /*
2163 2373 * Place this address in the RAR if there is room,
2164 2374 * else put the controller into promiscuous mode
2165 2375 */
2166 2376 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2167 2377 rar = hw->addr_ctrl.rar_used_count;
2168 2378 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2169 2379 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2170 2380 hw->addr_ctrl.rar_used_count++;
2171 2381 } else {
2172 2382 hw->addr_ctrl.overflow_promisc++;
2173 2383 }
2174 2384
2175 2385 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2176 2386 }
2177 2387
2178 2388 /**
2179 2389 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2180 2390 * @hw: pointer to hardware structure
2181 2391 * @addr_list: the list of new addresses
2182 2392 * @addr_count: number of addresses
2183 2393 * @next: iterator function to walk the address list
2184 2394 *
2185 2395 * The given list replaces any existing list. Clears the secondary addrs from
2186 2396 * receive address registers. Uses unused receive address registers for the
2187 2397 * first secondary addresses, and falls back to promiscuous mode as needed.
2188 2398 *
2189 2399 * Drivers using secondary unicast addresses must set user_set_promisc when
2190 2400 * manually putting the device into promiscuous mode.
2191 2401 **/
2192 2402 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2193 2403 u32 addr_count, ixgbe_mc_addr_itr next)
2194 2404 {
2195 2405 u8 *addr;
2196 2406 u32 i;
2197 2407 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2198 2408 u32 uc_addr_in_use;
2199 2409 u32 fctrl;
2200 2410 u32 vmdq;
2201 2411
2202 2412 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2203 2413
2204 2414 /*
2205 2415 * Clear accounting of old secondary address list,
2206 2416 * don't count RAR[0]
2207 2417 */
2208 2418 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2209 2419 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2210 2420 hw->addr_ctrl.overflow_promisc = 0;
2211 2421
2212 2422 /* Zero out the other receive addresses */
2213 2423 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2214 2424 for (i = 0; i < uc_addr_in_use; i++) {
2215 2425 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2216 2426 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2217 2427 }
2218 2428
2219 2429 /* Add the new addresses */
2220 2430 for (i = 0; i < addr_count; i++) {
2221 2431 DEBUGOUT(" Adding the secondary addresses:\n");
2222 2432 addr = next(hw, &addr_list, &vmdq);
2223 2433 ixgbe_add_uc_addr(hw, addr, vmdq);
2224 2434 }
2225 2435
2226 2436 if (hw->addr_ctrl.overflow_promisc) {
2227 2437 /* enable promisc if not already in overflow or set by user */
2228 2438 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2229 2439 DEBUGOUT(" Entering address overflow promisc mode\n");
2230 2440 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2231 2441 fctrl |= IXGBE_FCTRL_UPE;
2232 2442 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2233 2443 }
2234 2444 } else {
2235 2445 /* only disable if set by overflow, not by user */
2236 2446 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2237 2447 DEBUGOUT(" Leaving address overflow promisc mode\n");
2238 2448 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2239 2449 fctrl &= ~IXGBE_FCTRL_UPE;
2240 2450 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2241 2451 }
2242 2452 }
2243 2453
2244 2454 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2245 2455 return IXGBE_SUCCESS;
2246 2456 }
2247 2457
2248 2458 /**
2249 2459 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2250 2460 * @hw: pointer to hardware structure
2251 2461 * @mc_addr: the multicast address
2252 2462 *
2253 2463 * Extracts the 12 bits, from a multicast address, to determine which
2254 2464 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2255 2465 * incoming rx multicast addresses, to determine the bit-vector to check in
2256 2466 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2257 2467 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2258 2468 * to mc_filter_type.
2259 2469 **/
2260 2470 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2261 2471 {
2262 2472 u32 vector = 0;
2263 2473
2264 2474 DEBUGFUNC("ixgbe_mta_vector");
2265 2475
2266 2476 switch (hw->mac.mc_filter_type) {
2267 2477 case 0: /* use bits [47:36] of the address */
2268 2478 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2269 2479 break;
2270 2480 case 1: /* use bits [46:35] of the address */
2271 2481 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2272 2482 break;
2273 2483 case 2: /* use bits [45:34] of the address */
2274 2484 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2275 2485 break;
2276 2486 case 3: /* use bits [43:32] of the address */
2277 2487 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2278 2488 break;
2279 2489 default: /* Invalid mc_filter_type */
2280 2490 DEBUGOUT("MC filter type param set incorrectly\n");
2281 2491 ASSERT(0);
2282 2492 break;
2283 2493 }
2284 2494
2285 2495 /* vector can only be 12-bits or boundary will be exceeded */
2286 2496 vector &= 0xFFF;
2287 2497 return vector;
2288 2498 }
2289 2499
2290 2500 /**
2291 2501 * ixgbe_set_mta - Set bit-vector in multicast table
2292 2502 * @hw: pointer to hardware structure
2293 2503 * @hash_value: Multicast address hash value
2294 2504 *
2295 2505 * Sets the bit-vector in the multicast table.
2296 2506 **/
2297 2507 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2298 2508 {
2299 2509 u32 vector;
2300 2510 u32 vector_bit;
2301 2511 u32 vector_reg;
2302 2512
2303 2513 DEBUGFUNC("ixgbe_set_mta");
2304 2514
2305 2515 hw->addr_ctrl.mta_in_use++;
2306 2516
2307 2517 vector = ixgbe_mta_vector(hw, mc_addr);
2308 2518 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2309 2519
2310 2520 /*
2311 2521 * The MTA is a register array of 128 32-bit registers. It is treated
2312 2522 * like an array of 4096 bits. We want to set bit
2313 2523 * BitArray[vector_value]. So we figure out what register the bit is
2314 2524 * in, read it, OR in the new bit, then write back the new value. The
2315 2525 * register is determined by the upper 7 bits of the vector value and
2316 2526 * the bit within that register are determined by the lower 5 bits of
2317 2527 * the value.
2318 2528 */
2319 2529 vector_reg = (vector >> 5) & 0x7F;
2320 2530 vector_bit = vector & 0x1F;
2321 2531 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2322 2532 }
2323 2533
2324 2534 /**
2325 2535 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2326 2536 * @hw: pointer to hardware structure
2327 2537 * @mc_addr_list: the list of new multicast addresses
2328 2538 * @mc_addr_count: number of addresses
2329 2539 * @next: iterator function to walk the multicast address list
2330 2540 * @clear: flag, when set clears the table beforehand
2331 2541 *
2332 2542 * When the clear flag is set, the given list replaces any existing list.
2333 2543 * Hashes the given addresses into the multicast table.
2334 2544 **/
2335 2545 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2336 2546 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2337 2547 bool clear)
2338 2548 {
2339 2549 u32 i;
2340 2550 u32 vmdq;
2341 2551
2342 2552 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2343 2553
2344 2554 /*
2345 2555 * Set the new number of MC addresses that we are being requested to
2346 2556 * use.
2347 2557 */
2348 2558 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2349 2559 hw->addr_ctrl.mta_in_use = 0;
2350 2560
2351 2561 /* Clear mta_shadow */
2352 2562 if (clear) {
2353 2563 DEBUGOUT(" Clearing MTA\n");
2354 2564 (void) memset(&hw->mac.mta_shadow, 0,
2355 2565 sizeof(hw->mac.mta_shadow));
2356 2566 }
2357 2567
2358 2568 /* Update mta_shadow */
2359 2569 for (i = 0; i < mc_addr_count; i++) {
2360 2570 DEBUGOUT(" Adding the multicast addresses:\n");
2361 2571 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2362 2572 }
2363 2573
2364 2574 /* Enable mta */
2365 2575 for (i = 0; i < hw->mac.mcft_size; i++)
2366 2576 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2367 2577 hw->mac.mta_shadow[i]);
2368 2578
2369 2579 if (hw->addr_ctrl.mta_in_use > 0)
2370 2580 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2371 2581 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2372 2582
2373 2583 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2374 2584 return IXGBE_SUCCESS;
2375 2585 }
2376 2586
2377 2587 /**
2378 2588 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2379 2589 * @hw: pointer to hardware structure
2380 2590 *
2381 2591 * Enables multicast address in RAR and the use of the multicast hash table.
2382 2592 **/
2383 2593 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2384 2594 {
2385 2595 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2386 2596
2387 2597 DEBUGFUNC("ixgbe_enable_mc_generic");
2388 2598
2389 2599 if (a->mta_in_use > 0)
2390 2600 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2391 2601 hw->mac.mc_filter_type);
2392 2602
2393 2603 return IXGBE_SUCCESS;
2394 2604 }
2395 2605
2396 2606 /**
2397 2607 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2398 2608 * @hw: pointer to hardware structure
2399 2609 *
2400 2610 * Disables multicast address in RAR and the use of the multicast hash table.
2401 2611 **/
2402 2612 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2403 2613 {
2404 2614 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2405 2615
2406 2616 DEBUGFUNC("ixgbe_disable_mc_generic");
2407 2617
2408 2618 if (a->mta_in_use > 0)
2409 2619 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2410 2620
2411 2621 return IXGBE_SUCCESS;
2412 2622 }
2413 2623
2414 2624 /**
2415 2625 * ixgbe_fc_enable_generic - Enable flow control
2416 2626 * @hw: pointer to hardware structure
2417 2627 *
2418 2628 * Enable flow control according to the current settings.
2419 2629 **/
2420 2630 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2421 2631 {
2422 2632 s32 ret_val = IXGBE_SUCCESS;
2423 2633 u32 mflcn_reg, fccfg_reg;
2424 2634 u32 reg;
2425 2635 u32 fcrtl, fcrth;
2426 2636 int i;
2427 2637
2428 2638 DEBUGFUNC("ixgbe_fc_enable_generic");
2429 2639
2430 2640 /* Validate the water mark configuration */
2431 2641 if (!hw->fc.pause_time) {
2432 2642 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2433 2643 goto out;
2434 2644 }
2435 2645
2436 2646 /* Low water mark of zero causes XOFF floods */
2437 2647 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2438 2648 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2439 2649 hw->fc.high_water[i]) {
2440 2650 if (!hw->fc.low_water[i] ||
2441 2651 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2442 2652 DEBUGOUT("Invalid water mark configuration\n");
2443 2653 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2444 2654 goto out;
2445 2655 }
2446 2656 }
2447 2657 }
2448 2658
2449 2659 /* Negotiate the fc mode to use */
2450 2660 ixgbe_fc_autoneg(hw);
2451 2661
2452 2662 /* Disable any previous flow control settings */
2453 2663 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2454 2664 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2455 2665
2456 2666 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2457 2667 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2458 2668
2459 2669 /*
2460 2670 * The possible values of fc.current_mode are:
2461 2671 * 0: Flow control is completely disabled
2462 2672 * 1: Rx flow control is enabled (we can receive pause frames,
2463 2673 * but not send pause frames).
2464 2674 * 2: Tx flow control is enabled (we can send pause frames but
2465 2675 * we do not support receiving pause frames).
2466 2676 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2467 2677 * other: Invalid.
2468 2678 */
2469 2679 switch (hw->fc.current_mode) {
2470 2680 case ixgbe_fc_none:
2471 2681 /*
2472 2682 * Flow control is disabled by software override or autoneg.
2473 2683 * The code below will actually disable it in the HW.
2474 2684 */
2475 2685 break;
2476 2686 case ixgbe_fc_rx_pause:
2477 2687 /*
2478 2688 * Rx Flow control is enabled and Tx Flow control is
2479 2689 * disabled by software override. Since there really
2480 2690 * isn't a way to advertise that we are capable of RX
2481 2691 * Pause ONLY, we will advertise that we support both
2482 2692 * symmetric and asymmetric Rx PAUSE. Later, we will
2483 2693 * disable the adapter's ability to send PAUSE frames.
2484 2694 */
2485 2695 mflcn_reg |= IXGBE_MFLCN_RFCE;
2486 2696 break;
2487 2697 case ixgbe_fc_tx_pause:
2488 2698 /*
2489 2699 * Tx Flow control is enabled, and Rx Flow control is
2490 2700 * disabled by software override.
2491 2701 */
2492 2702 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2493 2703 break;
2494 2704 case ixgbe_fc_full:
2495 2705 /* Flow control (both Rx and Tx) is enabled by SW override. */
2496 2706 mflcn_reg |= IXGBE_MFLCN_RFCE;
2497 2707 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2498 2708 break;
2499 2709 default:
2500 2710 DEBUGOUT("Flow control param set incorrectly\n");
2501 2711 ret_val = IXGBE_ERR_CONFIG;
2502 2712 goto out;
2503 2713 }
2504 2714
2505 2715 /* Set 802.3x based flow control settings. */
2506 2716 mflcn_reg |= IXGBE_MFLCN_DPF;
2507 2717 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2508 2718 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2509 2719
2510 2720
2511 2721 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2512 2722 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2513 2723 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2514 2724 hw->fc.high_water[i]) {
2515 2725 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2516 2726 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2517 2727 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2518 2728 } else {
2519 2729 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2520 2730 /*
2521 2731 * In order to prevent Tx hangs when the internal Tx
2522 2732 * switch is enabled we must set the high water mark
2523 2733 * to the maximum FCRTH value. This allows the Tx
2524 2734 * switch to function even under heavy Rx workloads.
2525 2735 */
2526 2736 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2527 2737 }
2528 2738
2529 2739 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2530 2740 }
2531 2741
2532 2742 /* Configure pause time (2 TCs per register) */
2533 2743 reg = hw->fc.pause_time * 0x00010001;
2534 2744 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2535 2745 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2536 2746
2537 2747 /* Configure flow control refresh threshold value */
2538 2748 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2539 2749
2540 2750 out:
2541 2751 return ret_val;
2542 2752 }
2543 2753
2544 2754 /**
2545 2755 * ixgbe_negotiate_fc - Negotiate flow control
2546 2756 * @hw: pointer to hardware structure
2547 2757 * @adv_reg: flow control advertised settings
2548 2758 * @lp_reg: link partner's flow control settings
2549 2759 * @adv_sym: symmetric pause bit in advertisement
2550 2760 * @adv_asm: asymmetric pause bit in advertisement
2551 2761 * @lp_sym: symmetric pause bit in link partner advertisement
2552 2762 * @lp_asm: asymmetric pause bit in link partner advertisement
2553 2763 *
2554 2764 * Find the intersection between advertised settings and link partner's
2555 2765 * advertised settings
2556 2766 **/
2557 2767 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2558 2768 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2559 2769 {
2560 2770 if ((!(adv_reg)) || (!(lp_reg)))
2561 2771 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2562 2772
2563 2773 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2564 2774 /*
2565 2775 * Now we need to check if the user selected Rx ONLY
2566 2776 * of pause frames. In this case, we had to advertise
2567 2777 * FULL flow control because we could not advertise RX
2568 2778 * ONLY. Hence, we must now check to see if we need to
2569 2779 * turn OFF the TRANSMISSION of PAUSE frames.
2570 2780 */
2571 2781 if (hw->fc.requested_mode == ixgbe_fc_full) {
2572 2782 hw->fc.current_mode = ixgbe_fc_full;
2573 2783 DEBUGOUT("Flow Control = FULL.\n");
2574 2784 } else {
2575 2785 hw->fc.current_mode = ixgbe_fc_rx_pause;
2576 2786 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2577 2787 }
2578 2788 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2579 2789 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2580 2790 hw->fc.current_mode = ixgbe_fc_tx_pause;
2581 2791 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2582 2792 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2583 2793 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2584 2794 hw->fc.current_mode = ixgbe_fc_rx_pause;
2585 2795 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2586 2796 } else {
2587 2797 hw->fc.current_mode = ixgbe_fc_none;
2588 2798 DEBUGOUT("Flow Control = NONE.\n");
2589 2799 }
2590 2800 return IXGBE_SUCCESS;
2591 2801 }
2592 2802
2593 2803 /**
2594 2804 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2595 2805 * @hw: pointer to hardware structure
2596 2806 *
2597 2807 * Enable flow control according on 1 gig fiber.
2598 2808 **/
2599 2809 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2600 2810 {
2601 2811 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2602 2812 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2603 2813
2604 2814 /*
2605 2815 * On multispeed fiber at 1g, bail out if
2606 2816 * - link is up but AN did not complete, or if
2607 2817 * - link is up and AN completed but timed out
2608 2818 */
2609 2819
2610 2820 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2611 2821 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2612 2822 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2613 2823 goto out;
2614 2824
2615 2825 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2616 2826 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2617 2827
2618 2828 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2619 2829 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2620 2830 IXGBE_PCS1GANA_ASM_PAUSE,
2621 2831 IXGBE_PCS1GANA_SYM_PAUSE,
2622 2832 IXGBE_PCS1GANA_ASM_PAUSE);
2623 2833
2624 2834 out:
2625 2835 return ret_val;
2626 2836 }
2627 2837
2628 2838 /**
2629 2839 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2630 2840 * @hw: pointer to hardware structure
2631 2841 *
2632 2842 * Enable flow control according to IEEE clause 37.
2633 2843 **/
2634 2844 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2635 2845 {
2636 2846 u32 links2, anlp1_reg, autoc_reg, links;
2637 2847 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2638 2848
2639 2849 /*
2640 2850 * On backplane, bail out if
2641 2851 * - backplane autoneg was not completed, or if
2642 2852 * - we are 82599 and link partner is not AN enabled
2643 2853 */
2644 2854 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2645 2855 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2646 2856 goto out;
2647 2857
2648 2858 if (hw->mac.type == ixgbe_mac_82599EB) {
2649 2859 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2650 2860 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2651 2861 goto out;
2652 2862 }
2653 2863 /*
2654 2864 * Read the 10g AN autoc and LP ability registers and resolve
2655 2865 * local flow control settings accordingly
2656 2866 */
2657 2867 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2658 2868 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2659 2869
2660 2870 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2661 2871 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2662 2872 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2663 2873
2664 2874 out:
2665 2875 return ret_val;
2666 2876 }
2667 2877
2668 2878 /**
2669 2879 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2670 2880 * @hw: pointer to hardware structure
2671 2881 *
2672 2882 * Enable flow control according to IEEE clause 37.
2673 2883 **/
2674 2884 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2675 2885 {
2676 2886 u16 technology_ability_reg = 0;
2677 2887 u16 lp_technology_ability_reg = 0;
2678 2888
2679 2889 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2680 2890 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2681 2891 &technology_ability_reg);
2682 2892 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2683 2893 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2684 2894 &lp_technology_ability_reg);
2685 2895
2686 2896 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2687 2897 (u32)lp_technology_ability_reg,
2688 2898 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2689 2899 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2690 2900 }
2691 2901
2692 2902 /**
2693 2903 * ixgbe_fc_autoneg - Configure flow control
2694 2904 * @hw: pointer to hardware structure
2695 2905 *
2696 2906 * Compares our advertised flow control capabilities to those advertised by
2697 2907 * our link partner, and determines the proper flow control mode to use.
2698 2908 **/
2699 2909 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2700 2910 {
2701 2911 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2702 2912 ixgbe_link_speed speed;
2703 2913 bool link_up;
2704 2914
2705 2915 DEBUGFUNC("ixgbe_fc_autoneg");
2706 2916
2707 2917 /*
2708 2918 * AN should have completed when the cable was plugged in.
2709 2919 * Look for reasons to bail out. Bail out if:
2710 2920 * - FC autoneg is disabled, or if
2711 2921 * - link is not up.
|
↓ open down ↓ |
1429 lines elided |
↑ open up ↑ |
2712 2922 */
2713 2923 if (hw->fc.disable_fc_autoneg)
2714 2924 goto out;
2715 2925
2716 2926 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2717 2927 if (!link_up)
2718 2928 goto out;
2719 2929
2720 2930 switch (hw->phy.media_type) {
2721 2931 /* Autoneg flow control on fiber adapters */
2932 + case ixgbe_media_type_fiber_fixed:
2722 2933 case ixgbe_media_type_fiber:
2723 2934 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2724 2935 ret_val = ixgbe_fc_autoneg_fiber(hw);
2725 2936 break;
2726 2937
2727 2938 /* Autoneg flow control on backplane adapters */
2728 2939 case ixgbe_media_type_backplane:
2729 2940 ret_val = ixgbe_fc_autoneg_backplane(hw);
2730 2941 break;
2731 2942
2732 2943 /* Autoneg flow control on copper adapters */
2733 2944 case ixgbe_media_type_copper:
2734 2945 if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
2735 2946 ret_val = ixgbe_fc_autoneg_copper(hw);
2736 2947 break;
2737 2948
2738 2949 default:
2739 2950 break;
2740 2951 }
2741 2952
2742 2953 out:
2743 2954 if (ret_val == IXGBE_SUCCESS) {
2744 2955 hw->fc.fc_was_autonegged = TRUE;
2745 2956 } else {
2746 2957 hw->fc.fc_was_autonegged = FALSE;
2747 2958 hw->fc.current_mode = hw->fc.requested_mode;
2748 2959 }
2749 2960 }
2750 2961
2751 2962 /**
2752 2963 * ixgbe_disable_pcie_master - Disable PCI-express master access
2753 2964 * @hw: pointer to hardware structure
2754 2965 *
2755 2966 * Disables PCI-Express master access and verifies there are no pending
2756 2967 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2757 2968 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2758 2969 * is returned signifying master requests disabled.
2759 2970 **/
2760 2971 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2761 2972 {
2762 2973 s32 status = IXGBE_SUCCESS;
2763 2974 u32 i;
2764 2975
2765 2976 DEBUGFUNC("ixgbe_disable_pcie_master");
2766 2977
2767 2978 /* Always set this bit to ensure any future transactions are blocked */
2768 2979 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2769 2980
2770 2981 /* Exit if master requets are blocked */
2771 2982 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2772 2983 goto out;
2773 2984
2774 2985 /* Poll for master request bit to clear */
2775 2986 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2776 2987 usec_delay(100);
2777 2988 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2778 2989 goto out;
2779 2990 }
2780 2991
2781 2992 /*
2782 2993 * Two consecutive resets are required via CTRL.RST per datasheet
2783 2994 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2784 2995 * of this need. The first reset prevents new master requests from
2785 2996 * being issued by our device. We then must wait 1usec or more for any
2786 2997 * remaining completions from the PCIe bus to trickle in, and then reset
2787 2998 * again to clear out any effects they may have had on our device.
2788 2999 */
2789 3000 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2790 3001 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2791 3002
2792 3003 /*
2793 3004 * Before proceeding, make sure that the PCIe block does not have
2794 3005 * transactions pending.
2795 3006 */
2796 3007 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2797 3008 usec_delay(100);
2798 3009 if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2799 3010 IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2800 3011 goto out;
2801 3012 }
2802 3013
2803 3014 DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
2804 3015 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2805 3016
2806 3017 out:
2807 3018 return status;
2808 3019 }
2809 3020
2810 3021 /**
2811 3022 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2812 3023 * @hw: pointer to hardware structure
2813 3024 * @mask: Mask to specify which semaphore to acquire
2814 3025 *
2815 3026 * Acquires the SWFW semaphore through the GSSR register for the specified
2816 3027 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2817 3028 **/
2818 3029 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2819 3030 {
2820 3031 u32 gssr;
2821 3032 u32 swmask = mask;
2822 3033 u32 fwmask = mask << 5;
2823 3034 s32 timeout = 200;
2824 3035
2825 3036 DEBUGFUNC("ixgbe_acquire_swfw_sync");
2826 3037
2827 3038 while (timeout) {
2828 3039 /*
2829 3040 * SW EEPROM semaphore bit is used for access to all
2830 3041 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2831 3042 */
2832 3043 if (ixgbe_get_eeprom_semaphore(hw))
2833 3044 return IXGBE_ERR_SWFW_SYNC;
2834 3045
2835 3046 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2836 3047 if (!(gssr & (fwmask | swmask)))
2837 3048 break;
2838 3049
2839 3050 /*
2840 3051 * Firmware currently using resource (fwmask) or other software
2841 3052 * thread currently using resource (swmask)
2842 3053 */
2843 3054 ixgbe_release_eeprom_semaphore(hw);
2844 3055 msec_delay(5);
2845 3056 timeout--;
2846 3057 }
2847 3058
2848 3059 if (!timeout) {
2849 3060 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
2850 3061 return IXGBE_ERR_SWFW_SYNC;
2851 3062 }
2852 3063
2853 3064 gssr |= swmask;
2854 3065 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2855 3066
2856 3067 ixgbe_release_eeprom_semaphore(hw);
2857 3068 return IXGBE_SUCCESS;
2858 3069 }
2859 3070
2860 3071 /**
2861 3072 * ixgbe_release_swfw_sync - Release SWFW semaphore
2862 3073 * @hw: pointer to hardware structure
2863 3074 * @mask: Mask to specify which semaphore to release
2864 3075 *
2865 3076 * Releases the SWFW semaphore through the GSSR register for the specified
2866 3077 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2867 3078 **/
2868 3079 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2869 3080 {
2870 3081 u32 gssr;
2871 3082 u32 swmask = mask;
2872 3083
2873 3084 DEBUGFUNC("ixgbe_release_swfw_sync");
2874 3085
2875 3086 (void) ixgbe_get_eeprom_semaphore(hw);
2876 3087
2877 3088 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2878 3089 gssr &= ~swmask;
2879 3090 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2880 3091
2881 3092 ixgbe_release_eeprom_semaphore(hw);
2882 3093 }
2883 3094
2884 3095 /**
2885 3096 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2886 3097 * @hw: pointer to hardware structure
2887 3098 *
2888 3099 * Stops the receive data path and waits for the HW to internally empty
2889 3100 * the Rx security block
2890 3101 **/
2891 3102 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2892 3103 {
2893 3104 #define IXGBE_MAX_SECRX_POLL 40
2894 3105
2895 3106 int i;
2896 3107 int secrxreg;
2897 3108
2898 3109 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
2899 3110
2900 3111
2901 3112 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2902 3113 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2903 3114 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2904 3115 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2905 3116 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2906 3117 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2907 3118 break;
2908 3119 else
2909 3120 /* Use interrupt-safe sleep just in case */
2910 3121 usec_delay(1000);
2911 3122 }
2912 3123
2913 3124 /* For informational purposes only */
2914 3125 if (i >= IXGBE_MAX_SECRX_POLL)
2915 3126 DEBUGOUT("Rx unit being enabled before security "
2916 3127 "path fully disabled. Continuing with init.\n");
2917 3128
2918 3129 return IXGBE_SUCCESS;
2919 3130 }
2920 3131
2921 3132 /**
2922 3133 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
2923 3134 * @hw: pointer to hardware structure
2924 3135 *
2925 3136 * Enables the receive data path.
2926 3137 **/
2927 3138 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
2928 3139 {
2929 3140 int secrxreg;
2930 3141
2931 3142 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
2932 3143
2933 3144 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2934 3145 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2935 3146 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2936 3147 IXGBE_WRITE_FLUSH(hw);
2937 3148
2938 3149 return IXGBE_SUCCESS;
2939 3150 }
2940 3151
2941 3152 /**
2942 3153 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2943 3154 * @hw: pointer to hardware structure
2944 3155 * @regval: register value to write to RXCTRL
2945 3156 *
2946 3157 * Enables the Rx DMA unit
2947 3158 **/
2948 3159 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2949 3160 {
2950 3161 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2951 3162
2952 3163 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2953 3164
2954 3165 return IXGBE_SUCCESS;
2955 3166 }
2956 3167
2957 3168 /**
|
↓ open down ↓ |
226 lines elided |
↑ open up ↑ |
2958 3169 * ixgbe_blink_led_start_generic - Blink LED based on index.
2959 3170 * @hw: pointer to hardware structure
2960 3171 * @index: led number to blink
2961 3172 **/
2962 3173 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2963 3174 {
2964 3175 ixgbe_link_speed speed = 0;
2965 3176 bool link_up = 0;
2966 3177 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2967 3178 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3179 + s32 ret_val = IXGBE_SUCCESS;
2968 3180
2969 3181 DEBUGFUNC("ixgbe_blink_led_start_generic");
2970 3182
2971 3183 /*
2972 3184 * Link must be up to auto-blink the LEDs;
2973 3185 * Force it if link is down.
2974 3186 */
2975 3187 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2976 3188
2977 3189 if (!link_up) {
3190 + /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3191 + * LESM is on.
3192 + */
3193 + bool got_lock = FALSE;
3194 + if ((hw->mac.type == ixgbe_mac_82599EB) &&
3195 + ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3196 + ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3197 + IXGBE_GSSR_MAC_CSR_SM);
3198 + if (ret_val != IXGBE_SUCCESS) {
3199 + ret_val = IXGBE_ERR_SWFW_SYNC;
3200 + goto out;
3201 + }
3202 + got_lock = TRUE;
3203 + }
3204 +
2978 3205 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2979 3206 autoc_reg |= IXGBE_AUTOC_FLU;
2980 3207 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2981 3208 IXGBE_WRITE_FLUSH(hw);
3209 +
3210 + if (got_lock)
3211 + hw->mac.ops.release_swfw_sync(hw,
3212 + IXGBE_GSSR_MAC_CSR_SM);
2982 3213 msec_delay(10);
2983 3214 }
2984 3215
2985 3216 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2986 3217 led_reg |= IXGBE_LED_BLINK(index);
2987 3218 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2988 3219 IXGBE_WRITE_FLUSH(hw);
2989 3220
2990 - return IXGBE_SUCCESS;
3221 +out:
3222 + return ret_val;
2991 3223 }
2992 3224
2993 3225 /**
2994 3226 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2995 3227 * @hw: pointer to hardware structure
2996 3228 * @index: led number to stop blinking
2997 3229 **/
2998 3230 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2999 3231 {
3000 3232 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3001 3233 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3234 + s32 ret_val = IXGBE_SUCCESS;
3235 + bool got_lock = FALSE;
3002 3236
3003 3237 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3238 + /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3239 + * LESM is on.
3240 + */
3241 + if ((hw->mac.type == ixgbe_mac_82599EB) &&
3242 + ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3243 + ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3244 + IXGBE_GSSR_MAC_CSR_SM);
3245 + if (ret_val != IXGBE_SUCCESS) {
3246 + ret_val = IXGBE_ERR_SWFW_SYNC;
3247 + goto out;
3248 + }
3249 + got_lock = TRUE;
3250 + }
3004 3251
3005 3252
3006 3253 autoc_reg &= ~IXGBE_AUTOC_FLU;
3007 3254 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3008 3255 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3009 3256
3257 + if (hw->mac.type == ixgbe_mac_82599EB)
3258 + (void) ixgbe_reset_pipeline_82599(hw);
3259 +
3260 + if (got_lock)
3261 + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
3262 +
3010 3263 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3011 3264 led_reg &= ~IXGBE_LED_BLINK(index);
3012 3265 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3013 3266 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3014 3267 IXGBE_WRITE_FLUSH(hw);
3015 3268
3016 - return IXGBE_SUCCESS;
3269 +out:
3270 + return ret_val;
3017 3271 }
3018 3272
3019 3273 /**
3020 3274 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3021 3275 * @hw: pointer to hardware structure
3022 3276 * @san_mac_offset: SAN MAC address offset
3023 3277 *
3024 3278 * This function will read the EEPROM location for the SAN MAC address
3025 3279 * pointer, and returns the value at that location. This is used in both
3026 3280 * get and set mac_addr routines.
3027 3281 **/
3028 3282 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3029 3283 u16 *san_mac_offset)
3030 3284 {
3031 3285 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3032 3286
3033 3287 /*
3034 3288 * First read the EEPROM pointer to see if the MAC addresses are
3035 3289 * available.
3036 3290 */
3037 3291 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
3038 3292
3039 3293 return IXGBE_SUCCESS;
3040 3294 }
3041 3295
3042 3296 /**
3043 3297 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3044 3298 * @hw: pointer to hardware structure
3045 3299 * @san_mac_addr: SAN MAC address
3046 3300 *
3047 3301 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3048 3302 * per-port, so set_lan_id() must be called before reading the addresses.
3049 3303 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3050 3304 * upon for non-SFP connections, so we must call it here.
3051 3305 **/
3052 3306 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3053 3307 {
3054 3308 u16 san_mac_data, san_mac_offset;
3055 3309 u8 i;
3056 3310
3057 3311 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3058 3312
3059 3313 /*
3060 3314 * First read the EEPROM pointer to see if the MAC addresses are
3061 3315 * available. If they're not, no point in calling set_lan_id() here.
3062 3316 */
3063 3317 (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3064 3318
3065 3319 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3066 3320 /*
3067 3321 * No addresses available in this EEPROM. It's not an
3068 3322 * error though, so just wipe the local address and return.
3069 3323 */
3070 3324 for (i = 0; i < 6; i++)
3071 3325 san_mac_addr[i] = 0xFF;
3072 3326
3073 3327 goto san_mac_addr_out;
3074 3328 }
3075 3329
3076 3330 /* make sure we know which port we need to program */
3077 3331 hw->mac.ops.set_lan_id(hw);
3078 3332 /* apply the port offset to the address offset */
3079 3333 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3080 3334 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3081 3335 for (i = 0; i < 3; i++) {
3082 3336 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
3083 3337 san_mac_addr[i * 2] = (u8)(san_mac_data);
3084 3338 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3085 3339 san_mac_offset++;
3086 3340 }
3087 3341
3088 3342 san_mac_addr_out:
3089 3343 return IXGBE_SUCCESS;
3090 3344 }
3091 3345
3092 3346 /**
3093 3347 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3094 3348 * @hw: pointer to hardware structure
3095 3349 * @san_mac_addr: SAN MAC address
3096 3350 *
3097 3351 * Write a SAN MAC address to the EEPROM.
3098 3352 **/
3099 3353 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3100 3354 {
3101 3355 s32 status = IXGBE_SUCCESS;
3102 3356 u16 san_mac_data, san_mac_offset;
3103 3357 u8 i;
3104 3358
3105 3359 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3106 3360
3107 3361 /* Look for SAN mac address pointer. If not defined, return */
3108 3362 (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3109 3363
3110 3364 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3111 3365 status = IXGBE_ERR_NO_SAN_ADDR_PTR;
3112 3366 goto san_mac_addr_out;
3113 3367 }
3114 3368
3115 3369 /* Make sure we know which port we need to write */
3116 3370 hw->mac.ops.set_lan_id(hw);
3117 3371 /* Apply the port offset to the address offset */
3118 3372 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3119 3373 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3120 3374
3121 3375 for (i = 0; i < 3; i++) {
3122 3376 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3123 3377 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3124 3378 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3125 3379 san_mac_offset++;
3126 3380 }
3127 3381
3128 3382 san_mac_addr_out:
3129 3383 return status;
3130 3384 }
3131 3385
3132 3386 /**
3133 3387 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3134 3388 * @hw: pointer to hardware structure
3135 3389 *
3136 3390 * Read PCIe configuration space, and get the MSI-X vector count from
3137 3391 * the capabilities table.
3138 3392 **/
3139 3393 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3140 3394 {
3141 3395 u16 msix_count = 1;
3142 3396 u16 max_msix_count;
3143 3397 u16 pcie_offset;
3144 3398
3145 3399 switch (hw->mac.type) {
3146 3400 case ixgbe_mac_82598EB:
3147 3401 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3148 3402 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3149 3403 break;
3150 3404 case ixgbe_mac_82599EB:
3151 3405 case ixgbe_mac_X540:
3152 3406 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3153 3407 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3154 3408 break;
3155 3409 default:
3156 3410 return msix_count;
3157 3411 }
3158 3412
3159 3413 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3160 3414 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3161 3415 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3162 3416
3163 3417 /* MSI-X count is zero-based in HW */
3164 3418 msix_count++;
3165 3419
3166 3420 if (msix_count > max_msix_count)
3167 3421 msix_count = max_msix_count;
3168 3422
3169 3423 return msix_count;
3170 3424 }
3171 3425
3172 3426 /**
3173 3427 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3174 3428 * @hw: pointer to hardware structure
3175 3429 * @addr: Address to put into receive address register
3176 3430 * @vmdq: VMDq pool to assign
3177 3431 *
3178 3432 * Puts an ethernet address into a receive address register, or
3179 3433 * finds the rar that it is aleady in; adds to the pool list
3180 3434 **/
3181 3435 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3182 3436 {
3183 3437 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3184 3438 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3185 3439 u32 rar;
3186 3440 u32 rar_low, rar_high;
3187 3441 u32 addr_low, addr_high;
3188 3442
3189 3443 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3190 3444
3191 3445 /* swap bytes for HW little endian */
3192 3446 addr_low = addr[0] | (addr[1] << 8)
3193 3447 | (addr[2] << 16)
3194 3448 | (addr[3] << 24);
3195 3449 addr_high = addr[4] | (addr[5] << 8);
3196 3450
3197 3451 /*
3198 3452 * Either find the mac_id in rar or find the first empty space.
3199 3453 * rar_highwater points to just after the highest currently used
3200 3454 * rar in order to shorten the search. It grows when we add a new
3201 3455 * rar to the top.
3202 3456 */
3203 3457 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3204 3458 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3205 3459
3206 3460 if (((IXGBE_RAH_AV & rar_high) == 0)
3207 3461 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3208 3462 first_empty_rar = rar;
3209 3463 } else if ((rar_high & 0xFFFF) == addr_high) {
3210 3464 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3211 3465 if (rar_low == addr_low)
3212 3466 break; /* found it already in the rars */
3213 3467 }
3214 3468 }
3215 3469
3216 3470 if (rar < hw->mac.rar_highwater) {
3217 3471 /* already there so just add to the pool bits */
3218 3472 (void) ixgbe_set_vmdq(hw, rar, vmdq);
3219 3473 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3220 3474 /* stick it into first empty RAR slot we found */
3221 3475 rar = first_empty_rar;
3222 3476 (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3223 3477 } else if (rar == hw->mac.rar_highwater) {
3224 3478 /* add it to the top of the list and inc the highwater mark */
3225 3479 (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3226 3480 hw->mac.rar_highwater++;
3227 3481 } else if (rar >= hw->mac.num_rar_entries) {
3228 3482 return IXGBE_ERR_INVALID_MAC_ADDR;
3229 3483 }
3230 3484
3231 3485 /*
3232 3486 * If we found rar[0], make sure the default pool bit (we use pool 0)
3233 3487 * remains cleared to be sure default pool packets will get delivered
3234 3488 */
3235 3489 if (rar == 0)
3236 3490 (void) ixgbe_clear_vmdq(hw, rar, 0);
3237 3491
3238 3492 return rar;
3239 3493 }
3240 3494
3241 3495 /**
3242 3496 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3243 3497 * @hw: pointer to hardware struct
3244 3498 * @rar: receive address register index to disassociate
3245 3499 * @vmdq: VMDq pool index to remove from the rar
3246 3500 **/
3247 3501 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3248 3502 {
3249 3503 u32 mpsar_lo, mpsar_hi;
3250 3504 u32 rar_entries = hw->mac.num_rar_entries;
3251 3505
3252 3506 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3253 3507
3254 3508 /* Make sure we are using a valid rar index range */
3255 3509 if (rar >= rar_entries) {
3256 3510 DEBUGOUT1("RAR index %d is out of range.\n", rar);
3257 3511 return IXGBE_ERR_INVALID_ARGUMENT;
3258 3512 }
3259 3513
3260 3514 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3261 3515 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3262 3516
3263 3517 if (!mpsar_lo && !mpsar_hi)
3264 3518 goto done;
3265 3519
3266 3520 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3267 3521 if (mpsar_lo) {
3268 3522 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3269 3523 mpsar_lo = 0;
3270 3524 }
3271 3525 if (mpsar_hi) {
3272 3526 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3273 3527 mpsar_hi = 0;
3274 3528 }
3275 3529 } else if (vmdq < 32) {
3276 3530 mpsar_lo &= ~(1 << vmdq);
3277 3531 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3278 3532 } else {
3279 3533 mpsar_hi &= ~(1 << (vmdq - 32));
3280 3534 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3281 3535 }
3282 3536
3283 3537 /* was that the last pool using this rar? */
3284 3538 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3285 3539 hw->mac.ops.clear_rar(hw, rar);
3286 3540 done:
3287 3541 return IXGBE_SUCCESS;
3288 3542 }
3289 3543
3290 3544 /**
3291 3545 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3292 3546 * @hw: pointer to hardware struct
3293 3547 * @rar: receive address register index to associate with a VMDq index
3294 3548 * @vmdq: VMDq pool index
3295 3549 **/
3296 3550 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3297 3551 {
3298 3552 u32 mpsar;
3299 3553 u32 rar_entries = hw->mac.num_rar_entries;
3300 3554
3301 3555 DEBUGFUNC("ixgbe_set_vmdq_generic");
3302 3556
3303 3557 /* Make sure we are using a valid rar index range */
3304 3558 if (rar >= rar_entries) {
3305 3559 DEBUGOUT1("RAR index %d is out of range.\n", rar);
3306 3560 return IXGBE_ERR_INVALID_ARGUMENT;
3307 3561 }
3308 3562
3309 3563 if (vmdq < 32) {
3310 3564 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3311 3565 mpsar |= 1 << vmdq;
3312 3566 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3313 3567 } else {
3314 3568 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3315 3569 mpsar |= 1 << (vmdq - 32);
3316 3570 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3317 3571 }
3318 3572 return IXGBE_SUCCESS;
3319 3573 }
3320 3574
3321 3575 /**
3322 3576 * This function should only be involved in the IOV mode.
3323 3577 * In IOV mode, Default pool is next pool after the number of
3324 3578 * VFs advertized and not 0.
3325 3579 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3326 3580 *
3327 3581 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3328 3582 * @hw: pointer to hardware struct
3329 3583 * @vmdq: VMDq pool index
3330 3584 **/
3331 3585 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3332 3586 {
3333 3587 u32 rar = hw->mac.san_mac_rar_index;
3334 3588
3335 3589 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3336 3590
3337 3591 if (vmdq < 32) {
3338 3592 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3339 3593 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3340 3594 } else {
3341 3595 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3342 3596 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3343 3597 }
3344 3598
3345 3599 return IXGBE_SUCCESS;
3346 3600 }
3347 3601
3348 3602 /**
3349 3603 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3350 3604 * @hw: pointer to hardware structure
3351 3605 **/
3352 3606 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3353 3607 {
3354 3608 int i;
3355 3609
3356 3610 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3357 3611 DEBUGOUT(" Clearing UTA\n");
3358 3612
3359 3613 for (i = 0; i < 128; i++)
3360 3614 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3361 3615
3362 3616 return IXGBE_SUCCESS;
3363 3617 }
3364 3618
3365 3619 /**
3366 3620 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3367 3621 * @hw: pointer to hardware structure
3368 3622 * @vlan: VLAN id to write to VLAN filter
3369 3623 *
3370 3624 * return the VLVF index where this VLAN id should be placed
3371 3625 *
3372 3626 **/
3373 3627 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3374 3628 {
3375 3629 u32 bits = 0;
3376 3630 u32 first_empty_slot = 0;
3377 3631 s32 regindex;
3378 3632
3379 3633 /* short cut the special case */
3380 3634 if (vlan == 0)
3381 3635 return 0;
3382 3636
3383 3637 /*
3384 3638 * Search for the vlan id in the VLVF entries. Save off the first empty
3385 3639 * slot found along the way
3386 3640 */
3387 3641 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3388 3642 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3389 3643 if (!bits && !(first_empty_slot))
3390 3644 first_empty_slot = regindex;
3391 3645 else if ((bits & 0x0FFF) == vlan)
3392 3646 break;
3393 3647 }
3394 3648
3395 3649 /*
3396 3650 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3397 3651 * in the VLVF. Else use the first empty VLVF register for this
3398 3652 * vlan id.
3399 3653 */
3400 3654 if (regindex >= IXGBE_VLVF_ENTRIES) {
3401 3655 if (first_empty_slot)
3402 3656 regindex = first_empty_slot;
3403 3657 else {
3404 3658 DEBUGOUT("No space in VLVF.\n");
3405 3659 regindex = IXGBE_ERR_NO_SPACE;
3406 3660 }
3407 3661 }
3408 3662
3409 3663 return regindex;
3410 3664 }
3411 3665
3412 3666 /**
3413 3667 * ixgbe_set_vfta_generic - Set VLAN filter table
3414 3668 * @hw: pointer to hardware structure
3415 3669 * @vlan: VLAN id to write to VLAN filter
3416 3670 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3417 3671 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3418 3672 *
3419 3673 * Turn on/off specified VLAN in the VLAN filter table.
3420 3674 **/
3421 3675 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3422 3676 bool vlan_on)
3423 3677 {
3424 3678 s32 regindex;
3425 3679 u32 bitindex;
3426 3680 u32 vfta;
3427 3681 u32 targetbit;
3428 3682 s32 ret_val = IXGBE_SUCCESS;
3429 3683 bool vfta_changed = FALSE;
3430 3684
3431 3685 DEBUGFUNC("ixgbe_set_vfta_generic");
3432 3686
3433 3687 if (vlan > 4095)
3434 3688 return IXGBE_ERR_PARAM;
3435 3689
3436 3690 /*
3437 3691 * this is a 2 part operation - first the VFTA, then the
3438 3692 * VLVF and VLVFB if VT Mode is set
3439 3693 * We don't write the VFTA until we know the VLVF part succeeded.
3440 3694 */
3441 3695
3442 3696 /* Part 1
3443 3697 * The VFTA is a bitstring made up of 128 32-bit registers
3444 3698 * that enable the particular VLAN id, much like the MTA:
3445 3699 * bits[11-5]: which register
3446 3700 * bits[4-0]: which bit in the register
3447 3701 */
3448 3702 regindex = (vlan >> 5) & 0x7F;
3449 3703 bitindex = vlan & 0x1F;
3450 3704 targetbit = (1 << bitindex);
3451 3705 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3452 3706
3453 3707 if (vlan_on) {
3454 3708 if (!(vfta & targetbit)) {
3455 3709 vfta |= targetbit;
3456 3710 vfta_changed = TRUE;
3457 3711 }
3458 3712 } else {
3459 3713 if ((vfta & targetbit)) {
3460 3714 vfta &= ~targetbit;
3461 3715 vfta_changed = TRUE;
3462 3716 }
3463 3717 }
3464 3718
3465 3719 /* Part 2
3466 3720 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3467 3721 */
3468 3722 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3469 3723 &vfta_changed);
3470 3724 if (ret_val != IXGBE_SUCCESS)
3471 3725 return ret_val;
3472 3726
3473 3727 if (vfta_changed)
3474 3728 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3475 3729
3476 3730 return IXGBE_SUCCESS;
3477 3731 }
3478 3732
3479 3733 /**
3480 3734 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3481 3735 * @hw: pointer to hardware structure
3482 3736 * @vlan: VLAN id to write to VLAN filter
3483 3737 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3484 3738 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3485 3739 * @vfta_changed: pointer to boolean flag which indicates whether VFTA
3486 3740 * should be changed
3487 3741 *
3488 3742 * Turn on/off specified bit in VLVF table.
3489 3743 **/
3490 3744 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3491 3745 bool vlan_on, bool *vfta_changed)
3492 3746 {
3493 3747 u32 vt;
3494 3748
3495 3749 DEBUGFUNC("ixgbe_set_vlvf_generic");
3496 3750
3497 3751 if (vlan > 4095)
3498 3752 return IXGBE_ERR_PARAM;
3499 3753
3500 3754 /* If VT Mode is set
3501 3755 * Either vlan_on
3502 3756 * make sure the vlan is in VLVF
3503 3757 * set the vind bit in the matching VLVFB
3504 3758 * Or !vlan_on
3505 3759 * clear the pool bit and possibly the vind
3506 3760 */
3507 3761 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3508 3762 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3509 3763 s32 vlvf_index;
3510 3764 u32 bits;
3511 3765
3512 3766 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3513 3767 if (vlvf_index < 0)
3514 3768 return vlvf_index;
3515 3769
3516 3770 if (vlan_on) {
3517 3771 /* set the pool bit */
3518 3772 if (vind < 32) {
3519 3773 bits = IXGBE_READ_REG(hw,
3520 3774 IXGBE_VLVFB(vlvf_index * 2));
3521 3775 bits |= (1 << vind);
3522 3776 IXGBE_WRITE_REG(hw,
3523 3777 IXGBE_VLVFB(vlvf_index * 2),
3524 3778 bits);
3525 3779 } else {
3526 3780 bits = IXGBE_READ_REG(hw,
3527 3781 IXGBE_VLVFB((vlvf_index * 2) + 1));
3528 3782 bits |= (1 << (vind - 32));
3529 3783 IXGBE_WRITE_REG(hw,
3530 3784 IXGBE_VLVFB((vlvf_index * 2) + 1),
3531 3785 bits);
3532 3786 }
3533 3787 } else {
3534 3788 /* clear the pool bit */
3535 3789 if (vind < 32) {
3536 3790 bits = IXGBE_READ_REG(hw,
3537 3791 IXGBE_VLVFB(vlvf_index * 2));
3538 3792 bits &= ~(1 << vind);
3539 3793 IXGBE_WRITE_REG(hw,
3540 3794 IXGBE_VLVFB(vlvf_index * 2),
3541 3795 bits);
3542 3796 bits |= IXGBE_READ_REG(hw,
3543 3797 IXGBE_VLVFB((vlvf_index * 2) + 1));
3544 3798 } else {
3545 3799 bits = IXGBE_READ_REG(hw,
3546 3800 IXGBE_VLVFB((vlvf_index * 2) + 1));
3547 3801 bits &= ~(1 << (vind - 32));
3548 3802 IXGBE_WRITE_REG(hw,
3549 3803 IXGBE_VLVFB((vlvf_index * 2) + 1),
3550 3804 bits);
3551 3805 bits |= IXGBE_READ_REG(hw,
3552 3806 IXGBE_VLVFB(vlvf_index * 2));
3553 3807 }
3554 3808 }
3555 3809
3556 3810 /*
3557 3811 * If there are still bits set in the VLVFB registers
3558 3812 * for the VLAN ID indicated we need to see if the
3559 3813 * caller is requesting that we clear the VFTA entry bit.
3560 3814 * If the caller has requested that we clear the VFTA
3561 3815 * entry bit but there are still pools/VFs using this VLAN
3562 3816 * ID entry then ignore the request. We're not worried
3563 3817 * about the case where we're turning the VFTA VLAN ID
3564 3818 * entry bit on, only when requested to turn it off as
3565 3819 * there may be multiple pools and/or VFs using the
3566 3820 * VLAN ID entry. In that case we cannot clear the
3567 3821 * VFTA bit until all pools/VFs using that VLAN ID have also
3568 3822 * been cleared. This will be indicated by "bits" being
3569 3823 * zero.
3570 3824 */
3571 3825 if (bits) {
3572 3826 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3573 3827 (IXGBE_VLVF_VIEN | vlan));
3574 3828 if ((!vlan_on) && (vfta_changed != NULL)) {
3575 3829 /* someone wants to clear the vfta entry
3576 3830 * but some pools/VFs are still using it.
3577 3831 * Ignore it. */
3578 3832 *vfta_changed = FALSE;
3579 3833 }
3580 3834 } else
3581 3835 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3582 3836 }
3583 3837
3584 3838 return IXGBE_SUCCESS;
3585 3839 }
3586 3840
3587 3841 /**
3588 3842 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3589 3843 * @hw: pointer to hardware structure
3590 3844 *
3591 3845 * Clears the VLAN filer table, and the VMDq index associated with the filter
3592 3846 **/
3593 3847 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3594 3848 {
3595 3849 u32 offset;
3596 3850
3597 3851 DEBUGFUNC("ixgbe_clear_vfta_generic");
3598 3852
3599 3853 for (offset = 0; offset < hw->mac.vft_size; offset++)
3600 3854 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3601 3855
3602 3856 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3603 3857 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3604 3858 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3605 3859 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3606 3860 }
3607 3861
3608 3862 return IXGBE_SUCCESS;
3609 3863 }
3610 3864
3611 3865 /**
3612 3866 * ixgbe_check_mac_link_generic - Determine link and speed status
3613 3867 * @hw: pointer to hardware structure
3614 3868 * @speed: pointer to link speed
3615 3869 * @link_up: TRUE when link is up
3616 3870 * @link_up_wait_to_complete: bool used to wait for link up or not
3617 3871 *
3618 3872 * Reads the links register to determine if link is up and the current speed
3619 3873 **/
3620 3874 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3621 3875 bool *link_up, bool link_up_wait_to_complete)
3622 3876 {
3623 3877 u32 links_reg, links_orig;
3624 3878 u32 i;
3625 3879
3626 3880 DEBUGFUNC("ixgbe_check_mac_link_generic");
3627 3881
3628 3882 /* clear the old state */
3629 3883 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3630 3884
3631 3885 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3632 3886
3633 3887 if (links_orig != links_reg) {
3634 3888 DEBUGOUT2("LINKS changed from %08X to %08X\n",
3635 3889 links_orig, links_reg);
3636 3890 }
3637 3891
3638 3892 if (link_up_wait_to_complete) {
3639 3893 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3640 3894 if (links_reg & IXGBE_LINKS_UP) {
3641 3895 *link_up = TRUE;
3642 3896 break;
3643 3897 } else {
3644 3898 *link_up = FALSE;
3645 3899 }
3646 3900 msec_delay(100);
3647 3901 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3648 3902 }
3649 3903 } else {
3650 3904 if (links_reg & IXGBE_LINKS_UP)
3651 3905 *link_up = TRUE;
3652 3906 else
3653 3907 *link_up = FALSE;
3654 3908 }
3655 3909
3656 3910 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3657 3911 IXGBE_LINKS_SPEED_10G_82599)
3658 3912 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3659 3913 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3660 3914 IXGBE_LINKS_SPEED_1G_82599)
3661 3915 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3662 3916 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3663 3917 IXGBE_LINKS_SPEED_100_82599)
3664 3918 *speed = IXGBE_LINK_SPEED_100_FULL;
3665 3919 else
3666 3920 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3667 3921
3668 3922 return IXGBE_SUCCESS;
3669 3923 }
3670 3924
3671 3925 /**
3672 3926 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3673 3927 * the EEPROM
3674 3928 * @hw: pointer to hardware structure
3675 3929 * @wwnn_prefix: the alternative WWNN prefix
3676 3930 * @wwpn_prefix: the alternative WWPN prefix
3677 3931 *
3678 3932 * This function will read the EEPROM from the alternative SAN MAC address
3679 3933 * block to check the support for the alternative WWNN/WWPN prefix support.
3680 3934 **/
3681 3935 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3682 3936 u16 *wwpn_prefix)
3683 3937 {
3684 3938 u16 offset, caps;
3685 3939 u16 alt_san_mac_blk_offset;
3686 3940
3687 3941 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
3688 3942
3689 3943 /* clear output first */
3690 3944 *wwnn_prefix = 0xFFFF;
3691 3945 *wwpn_prefix = 0xFFFF;
3692 3946
3693 3947 /* check if alternative SAN MAC is supported */
3694 3948 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
3695 3949 &alt_san_mac_blk_offset);
3696 3950
3697 3951 if ((alt_san_mac_blk_offset == 0) ||
3698 3952 (alt_san_mac_blk_offset == 0xFFFF))
3699 3953 goto wwn_prefix_out;
3700 3954
3701 3955 /* check capability in alternative san mac address block */
3702 3956 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3703 3957 hw->eeprom.ops.read(hw, offset, &caps);
3704 3958 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3705 3959 goto wwn_prefix_out;
3706 3960
3707 3961 /* get the corresponding prefix for WWNN/WWPN */
3708 3962 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3709 3963 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
3710 3964
3711 3965 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3712 3966 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
3713 3967
3714 3968 wwn_prefix_out:
3715 3969 return IXGBE_SUCCESS;
3716 3970 }
3717 3971
3718 3972 /**
3719 3973 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
3720 3974 * @hw: pointer to hardware structure
3721 3975 * @bs: the fcoe boot status
3722 3976 *
3723 3977 * This function will read the FCOE boot status from the iSCSI FCOE block
3724 3978 **/
3725 3979 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
3726 3980 {
3727 3981 u16 offset, caps, flags;
3728 3982 s32 status;
3729 3983
3730 3984 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
3731 3985
3732 3986 /* clear output first */
3733 3987 *bs = ixgbe_fcoe_bootstatus_unavailable;
3734 3988
3735 3989 /* check if FCOE IBA block is present */
3736 3990 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
3737 3991 status = hw->eeprom.ops.read(hw, offset, &caps);
3738 3992 if (status != IXGBE_SUCCESS)
3739 3993 goto out;
3740 3994
3741 3995 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
3742 3996 goto out;
3743 3997
3744 3998 /* check if iSCSI FCOE block is populated */
3745 3999 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
3746 4000 if (status != IXGBE_SUCCESS)
3747 4001 goto out;
3748 4002
3749 4003 if ((offset == 0) || (offset == 0xFFFF))
3750 4004 goto out;
3751 4005
3752 4006 /* read fcoe flags in iSCSI FCOE block */
3753 4007 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
3754 4008 status = hw->eeprom.ops.read(hw, offset, &flags);
3755 4009 if (status != IXGBE_SUCCESS)
3756 4010 goto out;
3757 4011
3758 4012 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
3759 4013 *bs = ixgbe_fcoe_bootstatus_enabled;
3760 4014 else
3761 4015 *bs = ixgbe_fcoe_bootstatus_disabled;
3762 4016
3763 4017 out:
3764 4018 return status;
3765 4019 }
3766 4020
3767 4021 /**
3768 4022 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
3769 4023 * @hw: pointer to hardware structure
3770 4024 * @enable: enable or disable switch for anti-spoofing
3771 4025 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
3772 4026 *
3773 4027 **/
3774 4028 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
3775 4029 {
3776 4030 int j;
3777 4031 int pf_target_reg = pf >> 3;
3778 4032 int pf_target_shift = pf % 8;
3779 4033 u32 pfvfspoof = 0;
3780 4034
3781 4035 if (hw->mac.type == ixgbe_mac_82598EB)
3782 4036 return;
3783 4037
3784 4038 if (enable)
3785 4039 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
3786 4040
3787 4041 /*
3788 4042 * PFVFSPOOF register array is size 8 with 8 bits assigned to
3789 4043 * MAC anti-spoof enables in each register array element.
3790 4044 */
3791 4045 for (j = 0; j < pf_target_reg; j++)
3792 4046 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3793 4047
3794 4048 /*
3795 4049 * The PF should be allowed to spoof so that it can support
3796 4050 * emulation mode NICs. Do not set the bits assigned to the PF
3797 4051 */
3798 4052 pfvfspoof &= (1 << pf_target_shift) - 1;
3799 4053 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3800 4054
3801 4055 /*
3802 4056 * Remaining pools belong to the PF so they do not need to have
3803 4057 * anti-spoofing enabled.
3804 4058 */
3805 4059 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
3806 4060 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
3807 4061 }
3808 4062
3809 4063 /**
3810 4064 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
3811 4065 * @hw: pointer to hardware structure
3812 4066 * @enable: enable or disable switch for VLAN anti-spoofing
3813 4067 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
3814 4068 *
3815 4069 **/
3816 4070 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3817 4071 {
3818 4072 int vf_target_reg = vf >> 3;
3819 4073 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
3820 4074 u32 pfvfspoof;
3821 4075
3822 4076 if (hw->mac.type == ixgbe_mac_82598EB)
3823 4077 return;
3824 4078
3825 4079 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3826 4080 if (enable)
3827 4081 pfvfspoof |= (1 << vf_target_shift);
3828 4082 else
3829 4083 pfvfspoof &= ~(1 << vf_target_shift);
3830 4084 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3831 4085 }
3832 4086
3833 4087 /**
3834 4088 * ixgbe_get_device_caps_generic - Get additional device capabilities
3835 4089 * @hw: pointer to hardware structure
3836 4090 * @device_caps: the EEPROM word with the extra device capabilities
3837 4091 *
3838 4092 * This function will read the EEPROM location for the device capabilities,
3839 4093 * and return the word through device_caps.
3840 4094 **/
3841 4095 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3842 4096 {
3843 4097 DEBUGFUNC("ixgbe_get_device_caps_generic");
3844 4098
3845 4099 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3846 4100
3847 4101 return IXGBE_SUCCESS;
3848 4102 }
3849 4103
3850 4104 /**
3851 4105 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
3852 4106 * @hw: pointer to hardware structure
3853 4107 *
3854 4108 **/
3855 4109 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
3856 4110 {
3857 4111 u32 regval;
3858 4112 u32 i;
3859 4113
3860 4114 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
3861 4115
3862 4116 /* Enable relaxed ordering */
3863 4117 for (i = 0; i < hw->mac.max_tx_queues; i++) {
3864 4118 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
3865 4119 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3866 4120 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
3867 4121 }
3868 4122
3869 4123 for (i = 0; i < hw->mac.max_rx_queues; i++) {
3870 4124 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
3871 4125 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
3872 4126 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
3873 4127 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
3874 4128 }
|
↓ open down ↓ |
848 lines elided |
↑ open up ↑ |
3875 4129
3876 4130 }
3877 4131
3878 4132 /**
3879 4133 * ixgbe_calculate_checksum - Calculate checksum for buffer
3880 4134 * @buffer: pointer to EEPROM
3881 4135 * @length: size of EEPROM to calculate a checksum for
3882 4136 * Calculates the checksum for some buffer on a specified length. The
3883 4137 * checksum calculated is returned.
3884 4138 **/
3885 -static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4139 +u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3886 4140 {
3887 4141 u32 i;
3888 4142 u8 sum = 0;
3889 4143
3890 4144 DEBUGFUNC("ixgbe_calculate_checksum");
3891 4145
3892 4146 if (!buffer)
3893 4147 return 0;
3894 4148
3895 4149 for (i = 0; i < length; i++)
3896 4150 sum += buffer[i];
3897 4151
3898 4152 return (u8) (0 - sum);
3899 4153 }
3900 4154
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
3901 4155 /**
3902 4156 * ixgbe_host_interface_command - Issue command to manageability block
3903 4157 * @hw: pointer to the HW structure
3904 4158 * @buffer: contains the command to write and where the return status will
3905 4159 * be placed
3906 4160 * @length: length of buffer, must be multiple of 4 bytes
3907 4161 *
3908 4162 * Communicates with the manageability block. On success return IXGBE_SUCCESS
3909 4163 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
3910 4164 **/
3911 -static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3912 - u32 length)
4165 +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4166 + u32 length)
3913 4167 {
3914 4168 u32 hicr, i, bi;
3915 4169 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3916 4170 u8 buf_len, dword_len;
3917 4171
3918 4172 s32 ret_val = IXGBE_SUCCESS;
3919 4173
3920 4174 DEBUGFUNC("ixgbe_host_interface_command");
3921 4175
3922 4176 if (length == 0 || length & 0x3 ||
3923 4177 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3924 4178 DEBUGOUT("Buffer length failure.\n");
3925 4179 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3926 4180 goto out;
3927 4181 }
3928 4182
3929 4183 /* Check that the host interface is enabled. */
3930 4184 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3931 4185 if ((hicr & IXGBE_HICR_EN) == 0) {
3932 4186 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
3933 4187 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3934 4188 goto out;
3935 4189 }
3936 4190
3937 4191 /* Calculate length in DWORDs */
3938 4192 dword_len = length >> 2;
3939 4193
3940 4194 /*
3941 4195 * The device driver writes the relevant command block
3942 4196 * into the ram area.
3943 4197 */
3944 4198 for (i = 0; i < dword_len; i++)
3945 4199 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3946 4200 i, IXGBE_CPU_TO_LE32(buffer[i]));
3947 4201
3948 4202 /* Setting this bit tells the ARC that a new command is pending. */
3949 4203 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3950 4204
3951 4205 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
3952 4206 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3953 4207 if (!(hicr & IXGBE_HICR_C))
3954 4208 break;
3955 4209 msec_delay(1);
3956 4210 }
3957 4211
3958 4212 /* Check command successful completion. */
3959 4213 if (i == IXGBE_HI_COMMAND_TIMEOUT ||
3960 4214 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
3961 4215 DEBUGOUT("Command has failed with no status valid.\n");
3962 4216 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3963 4217 goto out;
3964 4218 }
3965 4219
3966 4220 /* Calculate length in DWORDs */
3967 4221 dword_len = hdr_size >> 2;
3968 4222
3969 4223 /* first pull in the header so we know the buffer length */
3970 4224 for (bi = 0; bi < dword_len; bi++) {
3971 4225 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3972 4226 buffer[bi] = IXGBE_LE32_TO_CPUS(buffer[bi]);
3973 4227 }
3974 4228
3975 4229 /* If there is any thing in data position pull it in */
3976 4230 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
3977 4231 if (buf_len == 0)
3978 4232 goto out;
3979 4233
3980 4234 if (length < (buf_len + hdr_size)) {
3981 4235 DEBUGOUT("Buffer not large enough for reply message.\n");
3982 4236 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3983 4237 goto out;
3984 4238 }
3985 4239
3986 4240 /* Calculate length in DWORDs, add 3 for odd lengths */
3987 4241 dword_len = (buf_len + 3) >> 2;
3988 4242
3989 4243 /* Pull in the rest of the buffer (bi is where we left off)*/
3990 4244 for (; bi <= dword_len; bi++) {
3991 4245 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3992 4246 buffer[bi] = IXGBE_LE32_TO_CPUS(buffer[bi]);
3993 4247 }
3994 4248
3995 4249 out:
3996 4250 return ret_val;
3997 4251 }
3998 4252
3999 4253 /**
4000 4254 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4001 4255 * @hw: pointer to the HW structure
4002 4256 * @maj: driver version major number
4003 4257 * @min: driver version minor number
4004 4258 * @build: driver version build number
4005 4259 * @sub: driver version sub build number
4006 4260 *
4007 4261 * Sends driver version number to firmware through the manageability
4008 4262 * block. On success return IXGBE_SUCCESS
4009 4263 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4010 4264 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4011 4265 **/
4012 4266 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4013 4267 u8 build, u8 sub)
4014 4268 {
4015 4269 struct ixgbe_hic_drv_info fw_cmd;
4016 4270 int i;
4017 4271 s32 ret_val = IXGBE_SUCCESS;
4018 4272
4019 4273 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4020 4274
4021 4275 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4022 4276 != IXGBE_SUCCESS) {
4023 4277 ret_val = IXGBE_ERR_SWFW_SYNC;
4024 4278 goto out;
4025 4279 }
4026 4280
4027 4281 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4028 4282 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4029 4283 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4030 4284 fw_cmd.port_num = (u8)hw->bus.func;
4031 4285 fw_cmd.ver_maj = maj;
4032 4286 fw_cmd.ver_min = min;
4033 4287 fw_cmd.ver_build = build;
4034 4288 fw_cmd.ver_sub = sub;
4035 4289 fw_cmd.hdr.checksum = 0;
4036 4290 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4037 4291 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4038 4292 fw_cmd.pad = 0;
4039 4293 fw_cmd.pad2 = 0;
4040 4294
4041 4295 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4042 4296 /* LINTED */
4043 4297 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4044 4298 sizeof(fw_cmd));
4045 4299 if (ret_val != IXGBE_SUCCESS)
4046 4300 continue;
4047 4301
4048 4302 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4049 4303 FW_CEM_RESP_STATUS_SUCCESS)
4050 4304 ret_val = IXGBE_SUCCESS;
4051 4305 else
4052 4306 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4053 4307
4054 4308 break;
4055 4309 }
4056 4310
4057 4311 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4058 4312 out:
4059 4313 return ret_val;
4060 4314 }
4061 4315
4062 4316 /**
4063 4317 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4064 4318 * @hw: pointer to hardware structure
4065 4319 * @num_pb: number of packet buffers to allocate
4066 4320 * @headroom: reserve n KB of headroom
4067 4321 * @strategy: packet buffer allocation strategy
4068 4322 **/
4069 4323 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4070 4324 int strategy)
4071 4325 {
4072 4326 u32 pbsize = hw->mac.rx_pb_size;
4073 4327 int i = 0;
4074 4328 u32 rxpktsize, txpktsize, txpbthresh;
4075 4329
4076 4330 /* Reserve headroom */
4077 4331 pbsize -= headroom;
4078 4332
4079 4333 if (!num_pb)
4080 4334 num_pb = 1;
4081 4335
4082 4336 /* Divide remaining packet buffer space amongst the number of packet
4083 4337 * buffers requested using supplied strategy.
4084 4338 */
4085 4339 switch (strategy) {
4086 4340 case PBA_STRATEGY_WEIGHTED:
4087 4341 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4088 4342 * buffer with 5/8 of the packet buffer space.
4089 4343 */
4090 4344 rxpktsize = (pbsize * 5) / (num_pb * 4);
4091 4345 pbsize -= rxpktsize * (num_pb / 2);
4092 4346 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4093 4347 for (; i < (num_pb / 2); i++)
4094 4348 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4095 4349 /* Fall through to configure remaining packet buffers */
4096 4350 /* FALLTHRU */
4097 4351 case PBA_STRATEGY_EQUAL:
4098 4352 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4099 4353 for (; i < num_pb; i++)
4100 4354 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4101 4355 break;
4102 4356 default:
4103 4357 break;
4104 4358 }
4105 4359
4106 4360 /* Only support an equally distributed Tx packet buffer strategy. */
4107 4361 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4108 4362 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4109 4363 for (i = 0; i < num_pb; i++) {
4110 4364 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4111 4365 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4112 4366 }
4113 4367
4114 4368 /* Clear unused TCs, if any, to zero buffer size*/
4115 4369 for (; i < IXGBE_MAX_PB; i++) {
4116 4370 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4117 4371 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4118 4372 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4119 4373 }
4120 4374 }
4121 4375
4122 4376 /**
4123 4377 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4124 4378 * @hw: pointer to the hardware structure
4125 4379 *
4126 4380 * The 82599 and x540 MACs can experience issues if TX work is still pending
4127 4381 * when a reset occurs. This function prevents this by flushing the PCIe
4128 4382 * buffers on the system.
4129 4383 **/
4130 4384 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4131 4385 {
4132 4386 u32 gcr_ext, hlreg0;
4133 4387
4134 4388 /*
4135 4389 * If double reset is not requested then all transactions should
4136 4390 * already be clear and as such there is no work to do
4137 4391 */
4138 4392 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4139 4393 return;
4140 4394
4141 4395 /*
4142 4396 * Set loopback enable to prevent any transmits from being sent
4143 4397 * should the link come up. This assumes that the RXCTRL.RXEN bit
4144 4398 * has already been cleared.
4145 4399 */
4146 4400 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4147 4401 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4148 4402
4149 4403 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4150 4404 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4151 4405 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4152 4406 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4153 4407
4154 4408 /* Flush all writes and allow 20usec for all transactions to clear */
4155 4409 IXGBE_WRITE_FLUSH(hw);
4156 4410 usec_delay(20);
4157 4411
4158 4412 /* restore previous register values */
4159 4413 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4160 4414 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4161 4415 }
4162 4416
|
↓ open down ↓ |
240 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX