Print this page
3014 Intel X540 Support
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_common.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_common.c
1 1 /******************************************************************************
2 2
3 - Copyright (c) 2001-2010, Intel Corporation
3 + Copyright (c) 2001-2012, Intel Corporation
4 4 All rights reserved.
5 5
6 6 Redistribution and use in source and binary forms, with or without
7 7 modification, are permitted provided that the following conditions are met:
8 8
9 9 1. Redistributions of source code must retain the above copyright notice,
10 10 this list of conditions and the following disclaimer.
11 11
12 12 2. Redistributions in binary form must reproduce the above copyright
13 13 notice, this list of conditions and the following disclaimer in the
14 14 documentation and/or other materials provided with the distribution.
15 15
16 16 3. Neither the name of the Intel Corporation nor the names of its
17 17 contributors may be used to endorse or promote products derived from
18 18 this software without specific prior written permission.
19 19
20 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
23 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 30 POSSIBILITY OF SUCH DAMAGE.
31 31
32 32 ******************************************************************************/
33 -/*$FreeBSD$*/
33 +/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_common.c,v 1.14 2012/07/05 20:51:44 jfv Exp $*/
34 34
35 35 #include "ixgbe_common.h"
36 36 #include "ixgbe_phy.h"
37 37 #include "ixgbe_api.h"
38 38
39 39 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
40 40 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
41 41 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
42 42 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
43 43 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
44 44 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
45 - u16 count);
45 + u16 count);
46 46 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
47 47 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
48 48 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 49 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
50 50
51 51 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
52 52 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
53 - u16 *san_mac_offset);
54 -static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
55 -static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
56 -static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
57 -static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
58 -static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
59 - u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
53 + u16 *san_mac_offset);
54 +static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
55 + u16 words, u16 *data);
56 +static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
57 + u16 words, u16 *data);
58 +static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
59 + u16 offset);
60 60
61 -s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
62 -
63 61 /**
64 62 * ixgbe_init_ops_generic - Inits function ptrs
65 63 * @hw: pointer to the hardware structure
66 64 *
67 65 * Initialize the function pointers.
68 66 **/
69 67 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
70 68 {
71 69 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
72 70 struct ixgbe_mac_info *mac = &hw->mac;
73 71 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
74 72
75 73 DEBUGFUNC("ixgbe_init_ops_generic");
76 74
77 75 /* EEPROM */
78 76 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
79 77 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
80 - if (eec & (1 << 8))
78 + if (eec & IXGBE_EEC_PRES) {
81 79 eeprom->ops.read = &ixgbe_read_eerd_generic;
82 - else
80 + eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
81 + } else {
83 82 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
83 + eeprom->ops.read_buffer =
84 + &ixgbe_read_eeprom_buffer_bit_bang_generic;
85 + }
84 86 eeprom->ops.write = &ixgbe_write_eeprom_generic;
87 + eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
85 88 eeprom->ops.validate_checksum =
86 - &ixgbe_validate_eeprom_checksum_generic;
89 + &ixgbe_validate_eeprom_checksum_generic;
87 90 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
88 91 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
89 92
90 93 /* MAC */
91 94 mac->ops.init_hw = &ixgbe_init_hw_generic;
92 95 mac->ops.reset_hw = NULL;
93 96 mac->ops.start_hw = &ixgbe_start_hw_generic;
94 97 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
95 98 mac->ops.get_media_type = NULL;
96 99 mac->ops.get_supported_physical_layer = NULL;
97 100 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
98 101 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
99 102 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
100 103 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
101 104 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
102 105 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
103 106 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
104 107
105 108 /* LEDs */
106 109 mac->ops.led_on = &ixgbe_led_on_generic;
107 110 mac->ops.led_off = &ixgbe_led_off_generic;
108 111 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
109 112 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
110 113
111 114 /* RAR, Multicast, VLAN */
112 115 mac->ops.set_rar = &ixgbe_set_rar_generic;
113 116 mac->ops.clear_rar = &ixgbe_clear_rar_generic;
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
114 117 mac->ops.insert_mac_addr = NULL;
115 118 mac->ops.set_vmdq = NULL;
116 119 mac->ops.clear_vmdq = NULL;
117 120 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
118 121 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
119 122 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
120 123 mac->ops.enable_mc = &ixgbe_enable_mc_generic;
121 124 mac->ops.disable_mc = &ixgbe_disable_mc_generic;
122 125 mac->ops.clear_vfta = NULL;
123 126 mac->ops.set_vfta = NULL;
127 + mac->ops.set_vlvf = NULL;
124 128 mac->ops.init_uta_tables = NULL;
125 129
126 130 /* Flow Control */
127 131 mac->ops.fc_enable = &ixgbe_fc_enable_generic;
128 132
129 133 /* Link */
130 134 mac->ops.get_link_capabilities = NULL;
131 135 mac->ops.setup_link = NULL;
132 136 mac->ops.check_link = NULL;
133 137
134 138 return IXGBE_SUCCESS;
135 139 }
136 140
137 141 /**
142 + * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
143 + * control
144 + * @hw: pointer to hardware structure
145 + *
146 + * There are several phys that do not support autoneg flow control. This
147 + * function check the device id to see if the associated phy supports
148 + * autoneg flow control.
149 + **/
150 +static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
151 +{
152 +
153 + DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
154 +
155 + switch (hw->device_id) {
156 + case IXGBE_DEV_ID_X540T:
157 + case IXGBE_DEV_ID_X540T1:
158 + return IXGBE_SUCCESS;
159 + case IXGBE_DEV_ID_82599_T3_LOM:
160 + return IXGBE_SUCCESS;
161 + default:
162 + return IXGBE_ERR_FC_NOT_SUPPORTED;
163 + }
164 +}
165 +
166 +/**
167 + * ixgbe_setup_fc - Set up flow control
168 + * @hw: pointer to hardware structure
169 + *
170 + * Called at init time to set up flow control.
171 + **/
172 +static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
173 +{
174 + s32 ret_val = IXGBE_SUCCESS;
175 + u32 reg = 0, reg_bp = 0;
176 + u16 reg_cu = 0;
177 +
178 + DEBUGFUNC("ixgbe_setup_fc");
179 +
180 + /*
181 + * Validate the requested mode. Strict IEEE mode does not allow
182 + * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
183 + */
184 + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
185 + DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
186 + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
187 + goto out;
188 + }
189 +
190 + /*
191 + * 10gig parts do not have a word in the EEPROM to determine the
192 + * default flow control setting, so we explicitly set it to full.
193 + */
194 + if (hw->fc.requested_mode == ixgbe_fc_default)
195 + hw->fc.requested_mode = ixgbe_fc_full;
196 +
197 + /*
198 + * Set up the 1G and 10G flow control advertisement registers so the
199 + * HW will be able to do fc autoneg once the cable is plugged in. If
200 + * we link at 10G, the 1G advertisement is harmless and vice versa.
201 + */
202 + switch (hw->phy.media_type) {
203 + case ixgbe_media_type_fiber:
204 + case ixgbe_media_type_backplane:
205 + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
206 + reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
207 + break;
208 + case ixgbe_media_type_copper:
209 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
210 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
211 + break;
212 + default:
213 + break;
214 + }
215 +
216 + /*
217 + * The possible values of fc.requested_mode are:
218 + * 0: Flow control is completely disabled
219 + * 1: Rx flow control is enabled (we can receive pause frames,
220 + * but not send pause frames).
221 + * 2: Tx flow control is enabled (we can send pause frames but
222 + * we do not support receiving pause frames).
223 + * 3: Both Rx and Tx flow control (symmetric) are enabled.
224 + * other: Invalid.
225 + */
226 + switch (hw->fc.requested_mode) {
227 + case ixgbe_fc_none:
228 + /* Flow control completely disabled by software override. */
229 + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
230 + if (hw->phy.media_type == ixgbe_media_type_backplane)
231 + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
232 + IXGBE_AUTOC_ASM_PAUSE);
233 + else if (hw->phy.media_type == ixgbe_media_type_copper)
234 + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
235 + break;
236 + case ixgbe_fc_tx_pause:
237 + /*
238 + * Tx Flow control is enabled, and Rx Flow control is
239 + * disabled by software override.
240 + */
241 + reg |= IXGBE_PCS1GANA_ASM_PAUSE;
242 + reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
243 + if (hw->phy.media_type == ixgbe_media_type_backplane) {
244 + reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
245 + reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
246 + } else if (hw->phy.media_type == ixgbe_media_type_copper) {
247 + reg_cu |= IXGBE_TAF_ASM_PAUSE;
248 + reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
249 + }
250 + break;
251 + case ixgbe_fc_rx_pause:
252 + /*
253 + * Rx Flow control is enabled and Tx Flow control is
254 + * disabled by software override. Since there really
255 + * isn't a way to advertise that we are capable of RX
256 + * Pause ONLY, we will advertise that we support both
257 + * symmetric and asymmetric Rx PAUSE, as such we fall
258 + * through to the fc_full statement. Later, we will
259 + * disable the adapter's ability to send PAUSE frames.
260 + */
261 + case ixgbe_fc_full:
262 + /* Flow control (both Rx and Tx) is enabled by SW override. */
263 + reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
264 + if (hw->phy.media_type == ixgbe_media_type_backplane)
265 + reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
266 + IXGBE_AUTOC_ASM_PAUSE;
267 + else if (hw->phy.media_type == ixgbe_media_type_copper)
268 + reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
269 + break;
270 + default:
271 + DEBUGOUT("Flow control param set incorrectly\n");
272 + ret_val = IXGBE_ERR_CONFIG;
273 + goto out;
274 + }
275 +
276 + if (hw->mac.type != ixgbe_mac_X540) {
277 + /*
278 + * Enable auto-negotiation between the MAC & PHY;
279 + * the MAC will advertise clause 37 flow control.
280 + */
281 + IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
282 + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
283 +
284 + /* Disable AN timeout */
285 + if (hw->fc.strict_ieee)
286 + reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
287 +
288 + IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
289 + DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
290 + }
291 +
292 + /*
293 + * AUTOC restart handles negotiation of 1G and 10G on backplane
294 + * and copper. There is no need to set the PCS1GCTL register.
295 + *
296 + */
297 + if (hw->phy.media_type == ixgbe_media_type_backplane) {
298 + reg_bp |= IXGBE_AUTOC_AN_RESTART;
299 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
300 + } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
301 + (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
302 + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
303 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
304 + }
305 +
306 + DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
307 +out:
308 + return ret_val;
309 +}
310 +
311 +/**
138 312 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
139 313 * @hw: pointer to hardware structure
140 314 *
141 315 * Starts the hardware by filling the bus info structure and media type, clears
142 316 * all on chip counters, initializes receive address registers, multicast
143 317 * table, VLAN filter table, calls routine to set up link and flow control
144 318 * settings, and leaves transmit and receive units disabled and uninitialized
145 319 **/
146 320 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
147 321 {
322 + s32 ret_val;
148 323 u32 ctrl_ext;
149 324
150 325 DEBUGFUNC("ixgbe_start_hw_generic");
151 326
152 327 /* Set the media type */
153 328 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
154 329
155 330 /* PHY ops initialization must be done in reset_hw() */
156 331
157 332 /* Clear the VLAN filter table */
158 333 hw->mac.ops.clear_vfta(hw);
159 334
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
160 335 /* Clear statistics registers */
161 336 hw->mac.ops.clear_hw_cntrs(hw);
162 337
163 338 /* Set No Snoop Disable */
164 339 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
165 340 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
166 341 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
167 342 IXGBE_WRITE_FLUSH(hw);
168 343
169 344 /* Setup flow control */
170 - (void) ixgbe_setup_fc(hw, 0);
345 + ret_val = ixgbe_setup_fc(hw);
346 + if (ret_val != IXGBE_SUCCESS)
347 + goto out;
171 348
172 349 /* Clear adapter stopped flag */
173 350 hw->adapter_stopped = FALSE;
174 351
175 - return IXGBE_SUCCESS;
352 +out:
353 + return ret_val;
176 354 }
177 355
178 356 /**
179 357 * ixgbe_start_hw_gen2 - Init sequence for common device family
180 358 * @hw: pointer to hw structure
181 359 *
182 360 * Performs the init sequence common to the second generation
183 361 * of 10 GbE devices.
184 362 * Devices in the second generation:
185 363 * 82599
186 364 * X540
187 365 **/
188 366 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
189 367 {
190 368 u32 i;
191 369 u32 regval;
192 370
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
193 371 /* Clear the rate limiters */
194 372 for (i = 0; i < hw->mac.max_tx_queues; i++) {
195 373 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
196 374 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
197 375 }
198 376 IXGBE_WRITE_FLUSH(hw);
199 377
200 378 /* Disable relaxed ordering */
201 379 for (i = 0; i < hw->mac.max_tx_queues; i++) {
202 380 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
203 - regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
381 + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
204 382 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
205 383 }
206 384
207 385 for (i = 0; i < hw->mac.max_rx_queues; i++) {
208 386 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
209 - regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
210 - IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
387 + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
388 + IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
211 389 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
212 390 }
213 391
214 392 return IXGBE_SUCCESS;
215 393 }
216 394
217 395 /**
218 396 * ixgbe_init_hw_generic - Generic hardware initialization
219 397 * @hw: pointer to hardware structure
220 398 *
221 399 * Initialize the hardware by resetting the hardware, filling the bus info
222 400 * structure and media type, clears all on chip counters, initializes receive
223 401 * address registers, multicast table, VLAN filter table, calls routine to set
224 402 * up link and flow control settings, and leaves transmit and receive units
225 403 * disabled and uninitialized
226 404 **/
227 405 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
228 406 {
229 407 s32 status;
230 408
231 409 DEBUGFUNC("ixgbe_init_hw_generic");
232 410
233 411 /* Reset the hardware */
234 412 status = hw->mac.ops.reset_hw(hw);
235 413
236 414 if (status == IXGBE_SUCCESS) {
237 415 /* Start the HW */
238 416 status = hw->mac.ops.start_hw(hw);
239 417 }
240 418
241 419 return status;
242 420 }
243 421
244 422 /**
245 423 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
246 424 * @hw: pointer to hardware structure
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
247 425 *
248 426 * Clears all hardware statistics counters by reading them from the hardware
249 427 * Statistics counters are clear on read.
250 428 **/
251 429 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
252 430 {
253 431 u16 i = 0;
254 432
255 433 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
256 434
257 - (void) IXGBE_READ_REG(hw, IXGBE_CRCERRS);
258 - (void) IXGBE_READ_REG(hw, IXGBE_ILLERRC);
259 - (void) IXGBE_READ_REG(hw, IXGBE_ERRBC);
260 - (void) IXGBE_READ_REG(hw, IXGBE_MSPDC);
435 + IXGBE_READ_REG(hw, IXGBE_CRCERRS);
436 + IXGBE_READ_REG(hw, IXGBE_ILLERRC);
437 + IXGBE_READ_REG(hw, IXGBE_ERRBC);
438 + IXGBE_READ_REG(hw, IXGBE_MSPDC);
261 439 for (i = 0; i < 8; i++)
262 - (void) IXGBE_READ_REG(hw, IXGBE_MPC(i));
440 + IXGBE_READ_REG(hw, IXGBE_MPC(i));
263 441
264 - (void) IXGBE_READ_REG(hw, IXGBE_MLFC);
265 - (void) IXGBE_READ_REG(hw, IXGBE_MRFC);
266 - (void) IXGBE_READ_REG(hw, IXGBE_RLEC);
267 - (void) IXGBE_READ_REG(hw, IXGBE_LXONTXC);
268 - (void) IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
442 + IXGBE_READ_REG(hw, IXGBE_MLFC);
443 + IXGBE_READ_REG(hw, IXGBE_MRFC);
444 + IXGBE_READ_REG(hw, IXGBE_RLEC);
445 + IXGBE_READ_REG(hw, IXGBE_LXONTXC);
446 + IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
269 447 if (hw->mac.type >= ixgbe_mac_82599EB) {
270 - (void) IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
271 - (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
448 + IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
449 + IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
272 450 } else {
273 - (void) IXGBE_READ_REG(hw, IXGBE_LXONRXC);
274 - (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
451 + IXGBE_READ_REG(hw, IXGBE_LXONRXC);
452 + IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
275 453 }
276 454
277 455 for (i = 0; i < 8; i++) {
278 - (void) IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
279 - (void) IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
456 + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
457 + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
280 458 if (hw->mac.type >= ixgbe_mac_82599EB) {
281 - (void) IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
282 - (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
459 + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
460 + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
283 461 } else {
284 - (void) IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
285 - (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
462 + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
463 + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
286 464 }
287 465 }
288 466 if (hw->mac.type >= ixgbe_mac_82599EB)
289 467 for (i = 0; i < 8; i++)
290 - (void) IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
291 - (void) IXGBE_READ_REG(hw, IXGBE_PRC64);
292 - (void) IXGBE_READ_REG(hw, IXGBE_PRC127);
293 - (void) IXGBE_READ_REG(hw, IXGBE_PRC255);
294 - (void) IXGBE_READ_REG(hw, IXGBE_PRC511);
295 - (void) IXGBE_READ_REG(hw, IXGBE_PRC1023);
296 - (void) IXGBE_READ_REG(hw, IXGBE_PRC1522);
297 - (void) IXGBE_READ_REG(hw, IXGBE_GPRC);
298 - (void) IXGBE_READ_REG(hw, IXGBE_BPRC);
299 - (void) IXGBE_READ_REG(hw, IXGBE_MPRC);
300 - (void) IXGBE_READ_REG(hw, IXGBE_GPTC);
301 - (void) IXGBE_READ_REG(hw, IXGBE_GORCL);
302 - (void) IXGBE_READ_REG(hw, IXGBE_GORCH);
303 - (void) IXGBE_READ_REG(hw, IXGBE_GOTCL);
304 - (void) IXGBE_READ_REG(hw, IXGBE_GOTCH);
305 - for (i = 0; i < 8; i++)
306 - (void) IXGBE_READ_REG(hw, IXGBE_RNBC(i));
307 - (void) IXGBE_READ_REG(hw, IXGBE_RUC);
308 - (void) IXGBE_READ_REG(hw, IXGBE_RFC);
309 - (void) IXGBE_READ_REG(hw, IXGBE_ROC);
310 - (void) IXGBE_READ_REG(hw, IXGBE_RJC);
311 - (void) IXGBE_READ_REG(hw, IXGBE_MNGPRC);
312 - (void) IXGBE_READ_REG(hw, IXGBE_MNGPDC);
313 - (void) IXGBE_READ_REG(hw, IXGBE_MNGPTC);
314 - (void) IXGBE_READ_REG(hw, IXGBE_TORL);
315 - (void) IXGBE_READ_REG(hw, IXGBE_TORH);
316 - (void) IXGBE_READ_REG(hw, IXGBE_TPR);
317 - (void) IXGBE_READ_REG(hw, IXGBE_TPT);
318 - (void) IXGBE_READ_REG(hw, IXGBE_PTC64);
319 - (void) IXGBE_READ_REG(hw, IXGBE_PTC127);
320 - (void) IXGBE_READ_REG(hw, IXGBE_PTC255);
321 - (void) IXGBE_READ_REG(hw, IXGBE_PTC511);
322 - (void) IXGBE_READ_REG(hw, IXGBE_PTC1023);
323 - (void) IXGBE_READ_REG(hw, IXGBE_PTC1522);
324 - (void) IXGBE_READ_REG(hw, IXGBE_MPTC);
325 - (void) IXGBE_READ_REG(hw, IXGBE_BPTC);
468 + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
469 + IXGBE_READ_REG(hw, IXGBE_PRC64);
470 + IXGBE_READ_REG(hw, IXGBE_PRC127);
471 + IXGBE_READ_REG(hw, IXGBE_PRC255);
472 + IXGBE_READ_REG(hw, IXGBE_PRC511);
473 + IXGBE_READ_REG(hw, IXGBE_PRC1023);
474 + IXGBE_READ_REG(hw, IXGBE_PRC1522);
475 + IXGBE_READ_REG(hw, IXGBE_GPRC);
476 + IXGBE_READ_REG(hw, IXGBE_BPRC);
477 + IXGBE_READ_REG(hw, IXGBE_MPRC);
478 + IXGBE_READ_REG(hw, IXGBE_GPTC);
479 + IXGBE_READ_REG(hw, IXGBE_GORCL);
480 + IXGBE_READ_REG(hw, IXGBE_GORCH);
481 + IXGBE_READ_REG(hw, IXGBE_GOTCL);
482 + IXGBE_READ_REG(hw, IXGBE_GOTCH);
483 + if (hw->mac.type == ixgbe_mac_82598EB)
484 + for (i = 0; i < 8; i++)
485 + IXGBE_READ_REG(hw, IXGBE_RNBC(i));
486 + IXGBE_READ_REG(hw, IXGBE_RUC);
487 + IXGBE_READ_REG(hw, IXGBE_RFC);
488 + IXGBE_READ_REG(hw, IXGBE_ROC);
489 + IXGBE_READ_REG(hw, IXGBE_RJC);
490 + IXGBE_READ_REG(hw, IXGBE_MNGPRC);
491 + IXGBE_READ_REG(hw, IXGBE_MNGPDC);
492 + IXGBE_READ_REG(hw, IXGBE_MNGPTC);
493 + IXGBE_READ_REG(hw, IXGBE_TORL);
494 + IXGBE_READ_REG(hw, IXGBE_TORH);
495 + IXGBE_READ_REG(hw, IXGBE_TPR);
496 + IXGBE_READ_REG(hw, IXGBE_TPT);
497 + IXGBE_READ_REG(hw, IXGBE_PTC64);
498 + IXGBE_READ_REG(hw, IXGBE_PTC127);
499 + IXGBE_READ_REG(hw, IXGBE_PTC255);
500 + IXGBE_READ_REG(hw, IXGBE_PTC511);
501 + IXGBE_READ_REG(hw, IXGBE_PTC1023);
502 + IXGBE_READ_REG(hw, IXGBE_PTC1522);
503 + IXGBE_READ_REG(hw, IXGBE_MPTC);
504 + IXGBE_READ_REG(hw, IXGBE_BPTC);
326 505 for (i = 0; i < 16; i++) {
327 - (void) IXGBE_READ_REG(hw, IXGBE_QPRC(i));
328 - (void) IXGBE_READ_REG(hw, IXGBE_QPTC(i));
506 + IXGBE_READ_REG(hw, IXGBE_QPRC(i));
507 + IXGBE_READ_REG(hw, IXGBE_QPTC(i));
329 508 if (hw->mac.type >= ixgbe_mac_82599EB) {
330 - (void) IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
331 - (void) IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
332 - (void) IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
333 - (void) IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
334 - (void) IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
509 + IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
510 + IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
511 + IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
512 + IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
513 + IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
335 514 } else {
336 - (void) IXGBE_READ_REG(hw, IXGBE_QBRC(i));
337 - (void) IXGBE_READ_REG(hw, IXGBE_QBTC(i));
515 + IXGBE_READ_REG(hw, IXGBE_QBRC(i));
516 + IXGBE_READ_REG(hw, IXGBE_QBTC(i));
338 517 }
339 518 }
340 519
520 + if (hw->mac.type == ixgbe_mac_X540) {
521 + if (hw->phy.id == 0)
522 + ixgbe_identify_phy(hw);
523 + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
524 + IXGBE_MDIO_PCS_DEV_TYPE, &i);
525 + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
526 + IXGBE_MDIO_PCS_DEV_TYPE, &i);
527 + hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
528 + IXGBE_MDIO_PCS_DEV_TYPE, &i);
529 + hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
530 + IXGBE_MDIO_PCS_DEV_TYPE, &i);
531 + }
532 +
341 533 return IXGBE_SUCCESS;
342 534 }
343 535
344 536 /**
345 537 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
346 538 * @hw: pointer to hardware structure
347 539 * @pba_num: stores the part number string from the EEPROM
348 540 * @pba_num_size: part number string buffer length
349 541 *
350 542 * Reads the part number string from the EEPROM.
351 543 **/
352 544 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
353 - u32 pba_num_size)
545 + u32 pba_num_size)
354 546 {
355 547 s32 ret_val;
356 548 u16 data;
357 549 u16 pba_ptr;
358 550 u16 offset;
359 551 u16 length;
360 552
361 553 DEBUGFUNC("ixgbe_read_pba_string_generic");
362 554
363 555 if (pba_num == NULL) {
364 556 DEBUGOUT("PBA string buffer was null\n");
365 557 return IXGBE_ERR_INVALID_ARGUMENT;
366 558 }
367 559
368 560 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
369 561 if (ret_val) {
370 562 DEBUGOUT("NVM Read Error\n");
371 563 return ret_val;
372 564 }
373 565
374 566 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
375 567 if (ret_val) {
376 568 DEBUGOUT("NVM Read Error\n");
377 569 return ret_val;
378 570 }
379 571
380 572 /*
381 573 * if data is not ptr guard the PBA must be in legacy format which
382 574 * means pba_ptr is actually our second data word for the PBA number
383 575 * and we can decode it into an ascii string
384 576 */
385 577 if (data != IXGBE_PBANUM_PTR_GUARD) {
386 578 DEBUGOUT("NVM PBA number is not stored as string\n");
387 579
388 580 /* we will need 11 characters to store the PBA */
389 581 if (pba_num_size < 11) {
390 582 DEBUGOUT("PBA string buffer too small\n");
391 583 return IXGBE_ERR_NO_SPACE;
392 584 }
393 585
394 586 /* extract hex string from data and pba_ptr */
395 587 pba_num[0] = (data >> 12) & 0xF;
396 588 pba_num[1] = (data >> 8) & 0xF;
397 589 pba_num[2] = (data >> 4) & 0xF;
398 590 pba_num[3] = data & 0xF;
399 591 pba_num[4] = (pba_ptr >> 12) & 0xF;
400 592 pba_num[5] = (pba_ptr >> 8) & 0xF;
401 593 pba_num[6] = '-';
402 594 pba_num[7] = 0;
403 595 pba_num[8] = (pba_ptr >> 4) & 0xF;
404 596 pba_num[9] = pba_ptr & 0xF;
405 597
406 598 /* put a null character on the end of our string */
407 599 pba_num[10] = '\0';
408 600
409 601 /* switch all the data but the '-' to hex char */
410 602 for (offset = 0; offset < 10; offset++) {
411 603 if (pba_num[offset] < 0xA)
412 604 pba_num[offset] += '0';
413 605 else if (pba_num[offset] < 0x10)
414 606 pba_num[offset] += 'A' - 0xA;
415 607 }
416 608
417 609 return IXGBE_SUCCESS;
418 610 }
419 611
420 612 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
421 613 if (ret_val) {
422 614 DEBUGOUT("NVM Read Error\n");
423 615 return ret_val;
424 616 }
425 617
426 618 if (length == 0xFFFF || length == 0) {
427 619 DEBUGOUT("NVM PBA number section invalid length\n");
428 620 return IXGBE_ERR_PBA_SECTION;
429 621 }
430 622
431 623 /* check if pba_num buffer is big enough */
432 624 if (pba_num_size < (((u32)length * 2) - 1)) {
433 625 DEBUGOUT("PBA string buffer too small\n");
434 626 return IXGBE_ERR_NO_SPACE;
435 627 }
436 628
437 629 /* trim pba length from start of string */
438 630 pba_ptr++;
439 631 length--;
440 632
441 633 for (offset = 0; offset < length; offset++) {
442 634 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
443 635 if (ret_val) {
444 636 DEBUGOUT("NVM Read Error\n");
445 637 return ret_val;
|
↓ open down ↓ |
82 lines elided |
↑ open up ↑ |
446 638 }
447 639 pba_num[offset * 2] = (u8)(data >> 8);
448 640 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
449 641 }
450 642 pba_num[offset * 2] = '\0';
451 643
452 644 return IXGBE_SUCCESS;
453 645 }
454 646
455 647 /**
456 - * ixgbe_read_pba_length_generic - Reads part number length from EEPROM
457 - * @hw: pointer to hardware structure
458 - * @pba_num_size: part number string buffer length
459 - *
460 - * Reads the part number length from the EEPROM.
461 - * Returns expected buffer size in pba_num_size
462 - **/
463 -s32 ixgbe_read_pba_length_generic(struct ixgbe_hw *hw, u32 *pba_num_size)
464 -{
465 - s32 ret_val;
466 - u16 data;
467 - u16 pba_ptr;
468 - u16 length;
469 -
470 - DEBUGFUNC("ixgbe_read_pba_length_generic");
471 -
472 - if (pba_num_size == NULL) {
473 - DEBUGOUT("PBA buffer size was null\n");
474 - return IXGBE_ERR_INVALID_ARGUMENT;
475 - }
476 -
477 - ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
478 - if (ret_val) {
479 - DEBUGOUT("NVM Read Error\n");
480 - return ret_val;
481 - }
482 -
483 - ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
484 - if (ret_val) {
485 - DEBUGOUT("NVM Read Error\n");
486 - return ret_val;
487 - }
488 -
489 - /* if data is not ptr guard the PBA must be in legacy format */
490 - if (data != IXGBE_PBANUM_PTR_GUARD) {
491 - *pba_num_size = 11;
492 - return IXGBE_SUCCESS;
493 - }
494 -
495 - ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
496 - if (ret_val) {
497 - DEBUGOUT("NVM Read Error\n");
498 - return ret_val;
499 - }
500 -
501 - if (length == 0xFFFF || length == 0) {
502 - DEBUGOUT("NVM PBA number section invalid length\n");
503 - return IXGBE_ERR_PBA_SECTION;
504 - }
505 -
506 - /*
507 - * Convert from length in u16 values to u8 chars, add 1 for NULL,
508 - * and subtract 2 because length field is included in length.
509 - */
510 - *pba_num_size = ((u32)length * 2) - 1;
511 -
512 - return IXGBE_SUCCESS;
513 -}
514 -
515 -/**
516 648 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
517 649 * @hw: pointer to hardware structure
518 650 * @pba_num: stores the part number from the EEPROM
519 651 *
520 652 * Reads the part number from the EEPROM.
521 653 **/
522 654 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
523 655 {
524 656 s32 ret_val;
525 657 u16 data;
526 658
527 659 DEBUGFUNC("ixgbe_read_pba_num_generic");
528 660
529 661 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
530 662 if (ret_val) {
531 663 DEBUGOUT("NVM Read Error\n");
532 664 return ret_val;
533 665 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
534 666 DEBUGOUT("NVM Not supported\n");
535 667 return IXGBE_NOT_IMPLEMENTED;
536 668 }
537 669 *pba_num = (u32)(data << 16);
538 670
539 671 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
540 672 if (ret_val) {
541 673 DEBUGOUT("NVM Read Error\n");
542 674 return ret_val;
543 675 }
544 676 *pba_num |= data;
545 677
546 678 return IXGBE_SUCCESS;
547 679 }
548 680
549 681 /**
550 682 * ixgbe_get_mac_addr_generic - Generic get MAC address
551 683 * @hw: pointer to hardware structure
552 684 * @mac_addr: Adapter MAC address
553 685 *
554 686 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
555 687 * A reset of the adapter must be performed prior to calling this function
556 688 * in order for the MAC address to have been loaded from the EEPROM into RAR0
557 689 **/
558 690 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
559 691 {
560 692 u32 rar_high;
561 693 u32 rar_low;
562 694 u16 i;
563 695
564 696 DEBUGFUNC("ixgbe_get_mac_addr_generic");
565 697
566 698 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
567 699 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
568 700
569 701 for (i = 0; i < 4; i++)
570 702 mac_addr[i] = (u8)(rar_low >> (i*8));
571 703
572 704 for (i = 0; i < 2; i++)
573 705 mac_addr[i+4] = (u8)(rar_high >> (i*8));
574 706
575 707 return IXGBE_SUCCESS;
576 708 }
577 709
578 710 /**
579 711 * ixgbe_get_bus_info_generic - Generic set PCI bus info
580 712 * @hw: pointer to hardware structure
581 713 *
582 714 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
583 715 **/
584 716 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
585 717 {
586 718 struct ixgbe_mac_info *mac = &hw->mac;
587 719 u16 link_status;
588 720
589 721 DEBUGFUNC("ixgbe_get_bus_info_generic");
590 722
591 723 hw->bus.type = ixgbe_bus_type_pci_express;
592 724
593 725 /* Get the negotiated link width and speed from PCI config space */
594 726 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
595 727
596 728 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
597 729 case IXGBE_PCI_LINK_WIDTH_1:
598 730 hw->bus.width = ixgbe_bus_width_pcie_x1;
599 731 break;
600 732 case IXGBE_PCI_LINK_WIDTH_2:
601 733 hw->bus.width = ixgbe_bus_width_pcie_x2;
602 734 break;
603 735 case IXGBE_PCI_LINK_WIDTH_4:
604 736 hw->bus.width = ixgbe_bus_width_pcie_x4;
605 737 break;
606 738 case IXGBE_PCI_LINK_WIDTH_8:
607 739 hw->bus.width = ixgbe_bus_width_pcie_x8;
608 740 break;
609 741 default:
610 742 hw->bus.width = ixgbe_bus_width_unknown;
|
↓ open down ↓ |
85 lines elided |
↑ open up ↑ |
611 743 break;
612 744 }
613 745
614 746 switch (link_status & IXGBE_PCI_LINK_SPEED) {
615 747 case IXGBE_PCI_LINK_SPEED_2500:
616 748 hw->bus.speed = ixgbe_bus_speed_2500;
617 749 break;
618 750 case IXGBE_PCI_LINK_SPEED_5000:
619 751 hw->bus.speed = ixgbe_bus_speed_5000;
620 752 break;
753 + case IXGBE_PCI_LINK_SPEED_8000:
754 + hw->bus.speed = ixgbe_bus_speed_8000;
755 + break;
621 756 default:
622 757 hw->bus.speed = ixgbe_bus_speed_unknown;
623 758 break;
624 759 }
625 760
626 761 mac->ops.set_lan_id(hw);
627 762
628 763 return IXGBE_SUCCESS;
629 764 }
630 765
631 766 /**
632 767 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
633 768 * @hw: pointer to the HW structure
634 769 *
635 770 * Determines the LAN function id by reading memory-mapped registers
636 771 * and swaps the port value if requested.
637 772 **/
638 773 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
639 774 {
640 775 struct ixgbe_bus_info *bus = &hw->bus;
641 776 u32 reg;
642 777
643 778 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
644 779
645 780 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
646 781 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
647 782 bus->lan_id = bus->func;
648 783
649 784 /* check for a port swap */
650 785 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
651 786 if (reg & IXGBE_FACTPS_LFS)
652 787 bus->func ^= 0x1;
653 788 }
654 789
655 790 /**
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
656 791 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
657 792 * @hw: pointer to hardware structure
658 793 *
659 794 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
660 795 * disables transmit and receive units. The adapter_stopped flag is used by
661 796 * the shared code and drivers to determine if the adapter is in a stopped
662 797 * state and should not touch the hardware.
663 798 **/
664 799 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
665 800 {
666 - u32 number_of_queues;
667 801 u32 reg_val;
668 802 u16 i;
669 803
670 804 DEBUGFUNC("ixgbe_stop_adapter_generic");
671 805
672 806 /*
673 807 * Set the adapter_stopped flag so other driver functions stop touching
674 808 * the hardware
675 809 */
676 810 hw->adapter_stopped = TRUE;
677 811
678 812 /* Disable the receive unit */
679 - reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
680 - reg_val &= ~(IXGBE_RXCTRL_RXEN);
681 - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
682 - IXGBE_WRITE_FLUSH(hw);
683 - msec_delay(2);
813 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
684 814
685 - /* Clear interrupt mask to stop from interrupts being generated */
815 + /* Clear interrupt mask to stop interrupts from being generated */
686 816 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
687 817
688 - /* Clear any pending interrupts */
689 - (void) IXGBE_READ_REG(hw, IXGBE_EICR);
818 + /* Clear any pending interrupts, flush previous writes */
819 + IXGBE_READ_REG(hw, IXGBE_EICR);
690 820
691 821 /* Disable the transmit unit. Each queue must be disabled. */
692 - number_of_queues = hw->mac.max_tx_queues;
693 - for (i = 0; i < number_of_queues; i++) {
694 - reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
695 - if (reg_val & IXGBE_TXDCTL_ENABLE) {
696 - reg_val &= ~IXGBE_TXDCTL_ENABLE;
697 - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
698 - }
822 + for (i = 0; i < hw->mac.max_tx_queues; i++)
823 + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
824 +
825 + /* Disable the receive unit by stopping each queue */
826 + for (i = 0; i < hw->mac.max_rx_queues; i++) {
827 + reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
828 + reg_val &= ~IXGBE_RXDCTL_ENABLE;
829 + reg_val |= IXGBE_RXDCTL_SWFLSH;
830 + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
699 831 }
700 832
833 + /* flush all queues disables */
834 + IXGBE_WRITE_FLUSH(hw);
835 + msec_delay(2);
836 +
701 837 /*
702 838 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
703 839 * access and verify no pending requests
704 840 */
705 - (void) ixgbe_disable_pcie_master(hw);
706 -
707 - return IXGBE_SUCCESS;
841 + return ixgbe_disable_pcie_master(hw);
708 842 }
709 843
710 844 /**
711 845 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
712 846 * @hw: pointer to hardware structure
713 847 * @index: led number to turn on
714 848 **/
715 849 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
716 850 {
717 851 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
718 852
719 853 DEBUGFUNC("ixgbe_led_on_generic");
720 854
721 855 /* To turn on the LED, set mode to ON. */
722 856 led_reg &= ~IXGBE_LED_MODE_MASK(index);
723 857 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
724 858 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
725 859 IXGBE_WRITE_FLUSH(hw);
726 860
727 861 return IXGBE_SUCCESS;
728 862 }
729 863
730 864 /**
731 865 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
732 866 * @hw: pointer to hardware structure
733 867 * @index: led number to turn off
734 868 **/
735 869 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
736 870 {
737 871 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
738 872
739 873 DEBUGFUNC("ixgbe_led_off_generic");
740 874
741 875 /* To turn off the LED, set mode to OFF. */
742 876 led_reg &= ~IXGBE_LED_MODE_MASK(index);
743 877 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
744 878 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
745 879 IXGBE_WRITE_FLUSH(hw);
746 880
747 881 return IXGBE_SUCCESS;
748 882 }
749 883
750 884 /**
751 885 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
752 886 * @hw: pointer to hardware structure
753 887 *
754 888 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
755 889 * ixgbe_hw struct in order to set up EEPROM access.
756 890 **/
757 891 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
758 892 {
759 893 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
|
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
760 894 u32 eec;
761 895 u16 eeprom_size;
762 896
763 897 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
764 898
765 899 if (eeprom->type == ixgbe_eeprom_uninitialized) {
766 900 eeprom->type = ixgbe_eeprom_none;
767 901 /* Set default semaphore delay to 10ms which is a well
768 902 * tested value */
769 903 eeprom->semaphore_delay = 10;
904 + /* Clear EEPROM page size, it will be initialized as needed */
905 + eeprom->word_page_size = 0;
770 906
771 907 /*
772 908 * Check for EEPROM present first.
773 909 * If not present leave as none
774 910 */
775 911 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
776 912 if (eec & IXGBE_EEC_PRES) {
777 913 eeprom->type = ixgbe_eeprom_spi;
778 914
779 915 /*
780 916 * SPI EEPROM is assumed here. This code would need to
781 917 * change if a future EEPROM is not SPI.
782 918 */
783 919 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
784 - IXGBE_EEC_SIZE_SHIFT);
920 + IXGBE_EEC_SIZE_SHIFT);
785 921 eeprom->word_size = 1 << (eeprom_size +
786 - IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT);
922 + IXGBE_EEPROM_WORD_SIZE_SHIFT);
787 923 }
788 924
789 925 if (eec & IXGBE_EEC_ADDR_SIZE)
790 926 eeprom->address_bits = 16;
791 927 else
792 928 eeprom->address_bits = 8;
793 929 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
794 - "%d\n", eeprom->type, eeprom->word_size,
795 - eeprom->address_bits);
930 + "%d\n", eeprom->type, eeprom->word_size,
931 + eeprom->address_bits);
796 932 }
797 933
798 934 return IXGBE_SUCCESS;
799 935 }
800 936
801 937 /**
802 - * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
938 + * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
803 939 * @hw: pointer to hardware structure
804 - * @offset: offset within the EEPROM to be written to
805 - * @data: 16 bit word to be written to the EEPROM
940 + * @offset: offset within the EEPROM to write
941 + * @words: number of word(s)
942 + * @data: 16 bit word(s) to write to EEPROM
806 943 *
807 - * If ixgbe_eeprom_update_checksum is not called after this function, the
808 - * EEPROM will most likely contain an invalid checksum.
944 + * Reads 16 bit word(s) from EEPROM through bit-bang method
809 945 **/
810 -s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
946 +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
947 + u16 words, u16 *data)
811 948 {
812 - s32 status;
813 - u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
949 + s32 status = IXGBE_SUCCESS;
950 + u16 i, count;
814 951
815 - DEBUGFUNC("ixgbe_write_eeprom_generic");
952 + DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
816 953
817 954 hw->eeprom.ops.init_params(hw);
818 955
819 - if (offset >= hw->eeprom.word_size) {
956 + if (words == 0) {
957 + status = IXGBE_ERR_INVALID_ARGUMENT;
958 + goto out;
959 + }
960 +
961 + if (offset + words > hw->eeprom.word_size) {
820 962 status = IXGBE_ERR_EEPROM;
821 963 goto out;
822 964 }
823 965
966 + /*
967 + * The EEPROM page size cannot be queried from the chip. We do lazy
968 + * initialization. It is worth to do that when we write large buffer.
969 + */
970 + if ((hw->eeprom.word_page_size == 0) &&
971 + (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
972 + ixgbe_detect_eeprom_page_size_generic(hw, offset);
973 +
974 + /*
975 + * We cannot hold synchronization semaphores for too long
976 + * to avoid other entity starvation. However it is more efficient
977 + * to read in bursts than synchronizing access for each word.
978 + */
979 + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
980 + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
981 + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
982 + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
983 + count, &data[i]);
984 +
985 + if (status != IXGBE_SUCCESS)
986 + break;
987 + }
988 +
989 +out:
990 + return status;
991 +}
992 +
993 +/**
994 + * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
995 + * @hw: pointer to hardware structure
996 + * @offset: offset within the EEPROM to be written to
997 + * @words: number of word(s)
998 + * @data: 16 bit word(s) to be written to the EEPROM
999 + *
1000 + * If ixgbe_eeprom_update_checksum is not called after this function, the
1001 + * EEPROM will most likely contain an invalid checksum.
1002 + **/
1003 +static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1004 + u16 words, u16 *data)
1005 +{
1006 + s32 status;
1007 + u16 word;
1008 + u16 page_size;
1009 + u16 i;
1010 + u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1011 +
1012 + DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1013 +
824 1014 /* Prepare the EEPROM for writing */
825 1015 status = ixgbe_acquire_eeprom(hw);
826 1016
827 1017 if (status == IXGBE_SUCCESS) {
828 1018 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
829 1019 ixgbe_release_eeprom(hw);
830 1020 status = IXGBE_ERR_EEPROM;
831 1021 }
832 1022 }
833 1023
834 1024 if (status == IXGBE_SUCCESS) {
835 - ixgbe_standby_eeprom(hw);
1025 + for (i = 0; i < words; i++) {
1026 + ixgbe_standby_eeprom(hw);
836 1027
837 - /* Send the WRITE ENABLE command (8 bit opcode ) */
838 - ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
839 - IXGBE_EEPROM_OPCODE_BITS);
1028 + /* Send the WRITE ENABLE command (8 bit opcode ) */
1029 + ixgbe_shift_out_eeprom_bits(hw,
1030 + IXGBE_EEPROM_WREN_OPCODE_SPI,
1031 + IXGBE_EEPROM_OPCODE_BITS);
840 1032
841 - ixgbe_standby_eeprom(hw);
1033 + ixgbe_standby_eeprom(hw);
842 1034
843 - /*
844 - * Some SPI eeproms use the 8th address bit embedded in the
845 - * opcode
846 - */
847 - if ((hw->eeprom.address_bits == 8) && (offset >= 128))
848 - write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1035 + /*
1036 + * Some SPI eeproms use the 8th address bit embedded
1037 + * in the opcode
1038 + */
1039 + if ((hw->eeprom.address_bits == 8) &&
1040 + ((offset + i) >= 128))
1041 + write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
849 1042
850 - /* Send the Write command (8-bit opcode + addr) */
851 - ixgbe_shift_out_eeprom_bits(hw, write_opcode,
852 - IXGBE_EEPROM_OPCODE_BITS);
853 - ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
854 - hw->eeprom.address_bits);
1043 + /* Send the Write command (8-bit opcode + addr) */
1044 + ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1045 + IXGBE_EEPROM_OPCODE_BITS);
1046 + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1047 + hw->eeprom.address_bits);
855 1048
856 - /* Send the data */
857 - data = (data >> 8) | (data << 8);
858 - ixgbe_shift_out_eeprom_bits(hw, data, 16);
859 - ixgbe_standby_eeprom(hw);
1049 + page_size = hw->eeprom.word_page_size;
860 1050
1051 + /* Send the data in burst via SPI*/
1052 + do {
1053 + word = data[i];
1054 + word = (word >> 8) | (word << 8);
1055 + ixgbe_shift_out_eeprom_bits(hw, word, 16);
1056 +
1057 + if (page_size == 0)
1058 + break;
1059 +
1060 + /* do not wrap around page */
1061 + if (((offset + i) & (page_size - 1)) ==
1062 + (page_size - 1))
1063 + break;
1064 + } while (++i < words);
1065 +
1066 + ixgbe_standby_eeprom(hw);
1067 + msec_delay(10);
1068 + }
861 1069 /* Done with writing - release the EEPROM */
862 1070 ixgbe_release_eeprom(hw);
863 1071 }
864 1072
865 -out:
866 1073 return status;
867 1074 }
868 1075
869 1076 /**
870 - * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1077 + * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
871 1078 * @hw: pointer to hardware structure
872 - * @offset: offset within the EEPROM to be read
873 - * @data: read 16 bit value from EEPROM
1079 + * @offset: offset within the EEPROM to be written to
1080 + * @data: 16 bit word to be written to the EEPROM
874 1081 *
875 - * Reads 16 bit value from EEPROM through bit-bang method
1082 + * If ixgbe_eeprom_update_checksum is not called after this function, the
1083 + * EEPROM will most likely contain an invalid checksum.
876 1084 **/
877 -s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
878 - u16 *data)
1085 +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
879 1086 {
880 1087 s32 status;
881 - u16 word_in;
882 - u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
883 1088
884 - DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1089 + DEBUGFUNC("ixgbe_write_eeprom_generic");
885 1090
886 1091 hw->eeprom.ops.init_params(hw);
887 1092
888 1093 if (offset >= hw->eeprom.word_size) {
889 1094 status = IXGBE_ERR_EEPROM;
890 1095 goto out;
891 1096 }
892 1097
1098 + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1099 +
1100 +out:
1101 + return status;
1102 +}
1103 +
1104 +/**
1105 + * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1106 + * @hw: pointer to hardware structure
1107 + * @offset: offset within the EEPROM to be read
1108 + * @data: read 16 bit words(s) from EEPROM
1109 + * @words: number of word(s)
1110 + *
1111 + * Reads 16 bit word(s) from EEPROM through bit-bang method
1112 + **/
1113 +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1114 + u16 words, u16 *data)
1115 +{
1116 + s32 status = IXGBE_SUCCESS;
1117 + u16 i, count;
1118 +
1119 + DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1120 +
1121 + hw->eeprom.ops.init_params(hw);
1122 +
1123 + if (words == 0) {
1124 + status = IXGBE_ERR_INVALID_ARGUMENT;
1125 + goto out;
1126 + }
1127 +
1128 + if (offset + words > hw->eeprom.word_size) {
1129 + status = IXGBE_ERR_EEPROM;
1130 + goto out;
1131 + }
1132 +
1133 + /*
1134 + * We cannot hold synchronization semaphores for too long
1135 + * to avoid other entity starvation. However it is more efficient
1136 + * to read in bursts than synchronizing access for each word.
1137 + */
1138 + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1139 + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1140 + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1141 +
1142 + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1143 + count, &data[i]);
1144 +
1145 + if (status != IXGBE_SUCCESS)
1146 + break;
1147 + }
1148 +
1149 +out:
1150 + return status;
1151 +}
1152 +
1153 +/**
1154 + * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1155 + * @hw: pointer to hardware structure
1156 + * @offset: offset within the EEPROM to be read
1157 + * @words: number of word(s)
1158 + * @data: read 16 bit word(s) from EEPROM
1159 + *
1160 + * Reads 16 bit word(s) from EEPROM through bit-bang method
1161 + **/
1162 +static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1163 + u16 words, u16 *data)
1164 +{
1165 + s32 status;
1166 + u16 word_in;
1167 + u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1168 + u16 i;
1169 +
1170 + DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1171 +
893 1172 /* Prepare the EEPROM for reading */
894 1173 status = ixgbe_acquire_eeprom(hw);
895 1174
896 1175 if (status == IXGBE_SUCCESS) {
897 1176 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
898 1177 ixgbe_release_eeprom(hw);
899 1178 status = IXGBE_ERR_EEPROM;
900 1179 }
901 1180 }
902 1181
903 1182 if (status == IXGBE_SUCCESS) {
904 - ixgbe_standby_eeprom(hw);
1183 + for (i = 0; i < words; i++) {
1184 + ixgbe_standby_eeprom(hw);
1185 + /*
1186 + * Some SPI eeproms use the 8th address bit embedded
1187 + * in the opcode
1188 + */
1189 + if ((hw->eeprom.address_bits == 8) &&
1190 + ((offset + i) >= 128))
1191 + read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
905 1192
906 - /*
907 - * Some SPI eeproms use the 8th address bit embedded in the
908 - * opcode
909 - */
910 - if ((hw->eeprom.address_bits == 8) && (offset >= 128))
911 - read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1193 + /* Send the READ command (opcode + addr) */
1194 + ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1195 + IXGBE_EEPROM_OPCODE_BITS);
1196 + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1197 + hw->eeprom.address_bits);
912 1198
913 - /* Send the READ command (opcode + addr) */
914 - ixgbe_shift_out_eeprom_bits(hw, read_opcode,
915 - IXGBE_EEPROM_OPCODE_BITS);
916 - ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
917 - hw->eeprom.address_bits);
1199 + /* Read the data. */
1200 + word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1201 + data[i] = (word_in >> 8) | (word_in << 8);
1202 + }
918 1203
919 - /* Read the data. */
920 - word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
921 - *data = (word_in >> 8) | (word_in << 8);
922 -
923 1204 /* End this read operation */
924 1205 ixgbe_release_eeprom(hw);
925 1206 }
926 1207
1208 + return status;
1209 +}
1210 +
1211 +/**
1212 + * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1213 + * @hw: pointer to hardware structure
1214 + * @offset: offset within the EEPROM to be read
1215 + * @data: read 16 bit value from EEPROM
1216 + *
1217 + * Reads 16 bit value from EEPROM through bit-bang method
1218 + **/
1219 +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1220 + u16 *data)
1221 +{
1222 + s32 status;
1223 +
1224 + DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1225 +
1226 + hw->eeprom.ops.init_params(hw);
1227 +
1228 + if (offset >= hw->eeprom.word_size) {
1229 + status = IXGBE_ERR_EEPROM;
1230 + goto out;
1231 + }
1232 +
1233 + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1234 +
927 1235 out:
928 1236 return status;
929 1237 }
930 1238
931 1239 /**
932 - * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1240 + * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
933 1241 * @hw: pointer to hardware structure
934 - * @offset: offset of word in the EEPROM to read
935 - * @data: word read from the EEPROM
1242 + * @offset: offset of word in the EEPROM to read
1243 + * @words: number of word(s)
1244 + * @data: 16 bit word(s) from the EEPROM
936 1245 *
937 - * Reads a 16 bit word from the EEPROM using the EERD register.
1246 + * Reads a 16 bit word(s) from the EEPROM using the EERD register.
938 1247 **/
939 -s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1248 +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1249 + u16 words, u16 *data)
940 1250 {
941 1251 u32 eerd;
942 - s32 status;
1252 + s32 status = IXGBE_SUCCESS;
1253 + u32 i;
943 1254
944 - DEBUGFUNC("ixgbe_read_eerd_generic");
1255 + DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
945 1256
946 1257 hw->eeprom.ops.init_params(hw);
947 1258
1259 + if (words == 0) {
1260 + status = IXGBE_ERR_INVALID_ARGUMENT;
1261 + goto out;
1262 + }
1263 +
948 1264 if (offset >= hw->eeprom.word_size) {
949 1265 status = IXGBE_ERR_EEPROM;
950 1266 goto out;
951 1267 }
952 1268
953 - eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
954 - IXGBE_EEPROM_RW_REG_START;
1269 + for (i = 0; i < words; i++) {
1270 + eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
1271 + IXGBE_EEPROM_RW_REG_START;
955 1272
956 - IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
957 - status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1273 + IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1274 + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
958 1275
959 - if (status == IXGBE_SUCCESS)
960 - *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
961 - IXGBE_EEPROM_RW_REG_DATA);
962 - else
963 - DEBUGOUT("Eeprom read timed out\n");
1276 + if (status == IXGBE_SUCCESS) {
1277 + data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1278 + IXGBE_EEPROM_RW_REG_DATA);
1279 + } else {
1280 + DEBUGOUT("Eeprom read timed out\n");
1281 + goto out;
1282 + }
1283 + }
1284 +out:
1285 + return status;
1286 +}
964 1287
1288 +/**
1289 + * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1290 + * @hw: pointer to hardware structure
1291 + * @offset: offset within the EEPROM to be used as a scratch pad
1292 + *
1293 + * Discover EEPROM page size by writing marching data at given offset.
1294 + * This function is called only when we are writing a new large buffer
1295 + * at given offset so the data would be overwritten anyway.
1296 + **/
1297 +static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1298 + u16 offset)
1299 +{
1300 + u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1301 + s32 status = IXGBE_SUCCESS;
1302 + u16 i;
1303 +
1304 + DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1305 +
1306 + for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1307 + data[i] = i;
1308 +
1309 + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1310 + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1311 + IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1312 + hw->eeprom.word_page_size = 0;
1313 + if (status != IXGBE_SUCCESS)
1314 + goto out;
1315 +
1316 + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1317 + if (status != IXGBE_SUCCESS)
1318 + goto out;
1319 +
1320 + /*
1321 + * When writing in burst more than the actual page size
1322 + * EEPROM address wraps around current page.
1323 + */
1324 + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1325 +
1326 + DEBUGOUT1("Detected EEPROM page size = %d words.",
1327 + hw->eeprom.word_page_size);
965 1328 out:
966 1329 return status;
967 1330 }
968 1331
969 1332 /**
970 - * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1333 + * ixgbe_read_eerd_generic - Read EEPROM word using EERD
971 1334 * @hw: pointer to hardware structure
1335 + * @offset: offset of word in the EEPROM to read
1336 + * @data: word read from the EEPROM
1337 + *
1338 + * Reads a 16 bit word from the EEPROM using the EERD register.
1339 + **/
1340 +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1341 +{
1342 + return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1343 +}
1344 +
1345 +/**
1346 + * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1347 + * @hw: pointer to hardware structure
972 1348 * @offset: offset of word in the EEPROM to write
973 - * @data: word write to the EEPROM
1349 + * @words: number of word(s)
1350 + * @data: word(s) write to the EEPROM
974 1351 *
975 - * Write a 16 bit word to the EEPROM using the EEWR register.
1352 + * Write a 16 bit word(s) to the EEPROM using the EEWR register.
976 1353 **/
977 -s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1354 +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1355 + u16 words, u16 *data)
978 1356 {
979 1357 u32 eewr;
980 - s32 status;
1358 + s32 status = IXGBE_SUCCESS;
1359 + u16 i;
981 1360
982 1361 DEBUGFUNC("ixgbe_write_eewr_generic");
983 1362
984 1363 hw->eeprom.ops.init_params(hw);
985 1364
1365 + if (words == 0) {
1366 + status = IXGBE_ERR_INVALID_ARGUMENT;
1367 + goto out;
1368 + }
1369 +
986 1370 if (offset >= hw->eeprom.word_size) {
987 1371 status = IXGBE_ERR_EEPROM;
988 1372 goto out;
989 1373 }
990 1374
991 - eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
992 - (data << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START;
1375 + for (i = 0; i < words; i++) {
1376 + eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1377 + (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1378 + IXGBE_EEPROM_RW_REG_START;
993 1379
994 - status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
995 - if (status != IXGBE_SUCCESS) {
996 - DEBUGOUT("Eeprom write EEWR timed out\n");
997 - goto out;
998 - }
1380 + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1381 + if (status != IXGBE_SUCCESS) {
1382 + DEBUGOUT("Eeprom write EEWR timed out\n");
1383 + goto out;
1384 + }
999 1385
1000 - IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1386 + IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1001 1387
1002 - status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1003 - if (status != IXGBE_SUCCESS) {
1004 - DEBUGOUT("Eeprom write EEWR timed out\n");
1005 - goto out;
1388 + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1389 + if (status != IXGBE_SUCCESS) {
1390 + DEBUGOUT("Eeprom write EEWR timed out\n");
1391 + goto out;
1392 + }
1006 1393 }
1007 1394
1008 1395 out:
1009 1396 return status;
1010 1397 }
1011 1398
1012 1399 /**
1400 + * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1401 + * @hw: pointer to hardware structure
1402 + * @offset: offset of word in the EEPROM to write
1403 + * @data: word write to the EEPROM
1404 + *
1405 + * Write a 16 bit word to the EEPROM using the EEWR register.
1406 + **/
1407 +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1408 +{
1409 + return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1410 +}
1411 +
1412 +/**
1013 1413 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1014 1414 * @hw: pointer to hardware structure
1015 1415 * @ee_reg: EEPROM flag for polling
1016 1416 *
1017 1417 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1018 1418 * read or write is done respectively.
1019 1419 **/
1020 1420 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1021 1421 {
1022 1422 u32 i;
1023 1423 u32 reg;
1024 1424 s32 status = IXGBE_ERR_EEPROM;
1025 1425
1026 1426 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1027 1427
1028 1428 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1029 1429 if (ee_reg == IXGBE_NVM_POLL_READ)
1030 1430 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1031 1431 else
1032 1432 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1033 1433
1034 1434 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1035 1435 status = IXGBE_SUCCESS;
1036 1436 break;
1037 1437 }
1038 1438 usec_delay(5);
1039 1439 }
1040 1440 return status;
1041 1441 }
1042 1442
1043 1443 /**
1044 1444 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1045 1445 * @hw: pointer to hardware structure
1046 1446 *
1047 1447 * Prepares EEPROM for access using bit-bang method. This function should
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
1048 1448 * be called before issuing a command to the EEPROM.
1049 1449 **/
1050 1450 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1051 1451 {
1052 1452 s32 status = IXGBE_SUCCESS;
1053 1453 u32 eec;
1054 1454 u32 i;
1055 1455
1056 1456 DEBUGFUNC("ixgbe_acquire_eeprom");
1057 1457
1058 - if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != IXGBE_SUCCESS)
1458 + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1459 + != IXGBE_SUCCESS)
1059 1460 status = IXGBE_ERR_SWFW_SYNC;
1060 1461
1061 1462 if (status == IXGBE_SUCCESS) {
1062 1463 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1063 1464
1064 1465 /* Request EEPROM Access */
1065 1466 eec |= IXGBE_EEC_REQ;
1066 1467 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1067 1468
1068 1469 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1069 1470 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1070 1471 if (eec & IXGBE_EEC_GNT)
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
1071 1472 break;
1072 1473 usec_delay(5);
1073 1474 }
1074 1475
1075 1476 /* Release if grant not acquired */
1076 1477 if (!(eec & IXGBE_EEC_GNT)) {
1077 1478 eec &= ~IXGBE_EEC_REQ;
1078 1479 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1079 1480 DEBUGOUT("Could not acquire EEPROM grant\n");
1080 1481
1081 - ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1482 + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1082 1483 status = IXGBE_ERR_EEPROM;
1083 1484 }
1084 1485
1085 1486 /* Setup EEPROM for Read/Write */
1086 1487 if (status == IXGBE_SUCCESS) {
1087 1488 /* Clear CS and SK */
1088 1489 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1089 1490 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1090 1491 IXGBE_WRITE_FLUSH(hw);
1091 1492 usec_delay(1);
1092 1493 }
1093 1494 }
1094 1495 return status;
1095 1496 }
1096 1497
1097 1498 /**
1098 1499 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1099 1500 * @hw: pointer to hardware structure
1100 1501 *
1101 1502 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1102 1503 **/
1103 1504 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1104 1505 {
1105 1506 s32 status = IXGBE_ERR_EEPROM;
1106 1507 u32 timeout = 2000;
1107 1508 u32 i;
1108 1509 u32 swsm;
1109 1510
1110 1511 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1111 1512
1112 1513
1113 1514 /* Get SMBI software semaphore between device drivers first */
1114 1515 for (i = 0; i < timeout; i++) {
1115 1516 /*
1116 1517 * If the SMBI bit is 0 when we read it, then the bit will be
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
1117 1518 * set and we have the semaphore
1118 1519 */
1119 1520 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1120 1521 if (!(swsm & IXGBE_SWSM_SMBI)) {
1121 1522 status = IXGBE_SUCCESS;
1122 1523 break;
1123 1524 }
1124 1525 usec_delay(50);
1125 1526 }
1126 1527
1528 + if (i == timeout) {
1529 + DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1530 + "not granted.\n");
1531 + /*
1532 + * this release is particularly important because our attempts
1533 + * above to get the semaphore may have succeeded, and if there
1534 + * was a timeout, we should unconditionally clear the semaphore
1535 + * bits to free the driver to make progress
1536 + */
1537 + ixgbe_release_eeprom_semaphore(hw);
1538 +
1539 + usec_delay(50);
1540 + /*
1541 + * one last try
1542 + * If the SMBI bit is 0 when we read it, then the bit will be
1543 + * set and we have the semaphore
1544 + */
1545 + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1546 + if (!(swsm & IXGBE_SWSM_SMBI))
1547 + status = IXGBE_SUCCESS;
1548 + }
1549 +
1127 1550 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1128 1551 if (status == IXGBE_SUCCESS) {
1129 1552 for (i = 0; i < timeout; i++) {
1130 1553 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1131 1554
1132 1555 /* Set the SW EEPROM semaphore bit to request access */
1133 1556 swsm |= IXGBE_SWSM_SWESMBI;
1134 1557 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1135 1558
1136 1559 /*
1137 1560 * If we set the bit successfully then we got the
1138 1561 * semaphore.
1139 1562 */
1140 1563 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1141 1564 if (swsm & IXGBE_SWSM_SWESMBI)
1142 1565 break;
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
1143 1566
1144 1567 usec_delay(50);
1145 1568 }
1146 1569
1147 1570 /*
1148 1571 * Release semaphores and return error if SW EEPROM semaphore
1149 1572 * was not granted because we don't have access to the EEPROM
1150 1573 */
1151 1574 if (i >= timeout) {
1152 1575 DEBUGOUT("SWESMBI Software EEPROM semaphore "
1153 - "not granted.\n");
1576 + "not granted.\n");
1154 1577 ixgbe_release_eeprom_semaphore(hw);
1155 1578 status = IXGBE_ERR_EEPROM;
1156 1579 }
1157 1580 } else {
1158 1581 DEBUGOUT("Software semaphore SMBI between device drivers "
1159 - "not granted.\n");
1582 + "not granted.\n");
1160 1583 }
1161 1584
1162 1585 return status;
1163 1586 }
1164 1587
1165 1588 /**
1166 1589 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1167 1590 * @hw: pointer to hardware structure
1168 1591 *
1169 1592 * This function clears hardware semaphore bits.
1170 1593 **/
1171 1594 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1172 1595 {
1173 1596 u32 swsm;
1174 1597
1175 1598 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1176 1599
1177 1600 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1178 1601
1179 1602 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1180 1603 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1181 1604 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1182 1605 IXGBE_WRITE_FLUSH(hw);
1183 1606 }
1184 1607
1185 1608 /**
1186 1609 * ixgbe_ready_eeprom - Polls for EEPROM ready
1187 1610 * @hw: pointer to hardware structure
1188 1611 **/
1189 1612 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1190 1613 {
1191 1614 s32 status = IXGBE_SUCCESS;
1192 1615 u16 i;
1193 1616 u8 spi_stat_reg;
1194 1617
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
1195 1618 DEBUGFUNC("ixgbe_ready_eeprom");
1196 1619
1197 1620 /*
1198 1621 * Read "Status Register" repeatedly until the LSB is cleared. The
1199 1622 * EEPROM will signal that the command has been completed by clearing
1200 1623 * bit 0 of the internal status register. If it's not cleared within
1201 1624 * 5 milliseconds, then error out.
1202 1625 */
1203 1626 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1204 1627 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1205 - IXGBE_EEPROM_OPCODE_BITS);
1628 + IXGBE_EEPROM_OPCODE_BITS);
1206 1629 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1207 1630 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1208 1631 break;
1209 1632
1210 1633 usec_delay(5);
1211 1634 ixgbe_standby_eeprom(hw);
1212 1635 };
1213 1636
1214 1637 /*
1215 1638 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1216 1639 * devices (and only 0-5mSec on 5V devices)
1217 1640 */
1218 1641 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1219 1642 DEBUGOUT("SPI EEPROM Status error\n");
1220 1643 status = IXGBE_ERR_EEPROM;
1221 1644 }
1222 1645
1223 1646 return status;
1224 1647 }
1225 1648
1226 1649 /**
1227 1650 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1228 1651 * @hw: pointer to hardware structure
1229 1652 **/
1230 1653 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1231 1654 {
1232 1655 u32 eec;
1233 1656
1234 1657 DEBUGFUNC("ixgbe_standby_eeprom");
1235 1658
1236 1659 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1237 1660
1238 1661 /* Toggle CS to flush commands */
1239 1662 eec |= IXGBE_EEC_CS;
1240 1663 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1241 1664 IXGBE_WRITE_FLUSH(hw);
1242 1665 usec_delay(1);
1243 1666 eec &= ~IXGBE_EEC_CS;
1244 1667 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1245 1668 IXGBE_WRITE_FLUSH(hw);
|
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
1246 1669 usec_delay(1);
1247 1670 }
1248 1671
1249 1672 /**
1250 1673 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1251 1674 * @hw: pointer to hardware structure
1252 1675 * @data: data to send to the EEPROM
1253 1676 * @count: number of bits to shift out
1254 1677 **/
1255 1678 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1256 - u16 count)
1679 + u16 count)
1257 1680 {
1258 1681 u32 eec;
1259 1682 u32 mask;
1260 1683 u32 i;
1261 1684
1262 1685 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1263 1686
1264 1687 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1265 1688
1266 1689 /*
1267 1690 * Mask is used to shift "count" bits of "data" out to the EEPROM
1268 1691 * one bit at a time. Determine the starting bit based on count
1269 1692 */
1270 1693 mask = 0x01 << (count - 1);
1271 1694
1272 1695 for (i = 0; i < count; i++) {
1273 1696 /*
1274 1697 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1275 1698 * "1", and then raising and then lowering the clock (the SK
1276 1699 * bit controls the clock input to the EEPROM). A "0" is
1277 1700 * shifted out to the EEPROM by setting "DI" to "0" and then
1278 1701 * raising and then lowering the clock.
1279 1702 */
1280 1703 if (data & mask)
1281 1704 eec |= IXGBE_EEC_DI;
1282 1705 else
1283 1706 eec &= ~IXGBE_EEC_DI;
1284 1707
1285 1708 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1286 1709 IXGBE_WRITE_FLUSH(hw);
1287 1710
1288 1711 usec_delay(1);
1289 1712
1290 1713 ixgbe_raise_eeprom_clk(hw, &eec);
1291 1714 ixgbe_lower_eeprom_clk(hw, &eec);
1292 1715
1293 1716 /*
1294 1717 * Shift mask to signify next bit of data to shift in to the
1295 1718 * EEPROM
1296 1719 */
1297 1720 mask = mask >> 1;
1298 1721 };
1299 1722
1300 1723 /* We leave the "DI" bit set to "0" when we leave this routine. */
1301 1724 eec &= ~IXGBE_EEC_DI;
1302 1725 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1303 1726 IXGBE_WRITE_FLUSH(hw);
1304 1727 }
1305 1728
1306 1729 /**
1307 1730 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1308 1731 * @hw: pointer to hardware structure
1309 1732 **/
1310 1733 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1311 1734 {
1312 1735 u32 eec;
1313 1736 u32 i;
1314 1737 u16 data = 0;
1315 1738
1316 1739 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1317 1740
1318 1741 /*
1319 1742 * In order to read a register from the EEPROM, we need to shift
1320 1743 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1321 1744 * the clock input to the EEPROM (setting the SK bit), and then reading
1322 1745 * the value of the "DO" bit. During this "shifting in" process the
1323 1746 * "DI" bit should always be clear.
1324 1747 */
1325 1748 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1326 1749
1327 1750 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1328 1751
1329 1752 for (i = 0; i < count; i++) {
1330 1753 data = data << 1;
1331 1754 ixgbe_raise_eeprom_clk(hw, &eec);
1332 1755
1333 1756 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1334 1757
1335 1758 eec &= ~(IXGBE_EEC_DI);
1336 1759 if (eec & IXGBE_EEC_DO)
1337 1760 data |= 1;
1338 1761
1339 1762 ixgbe_lower_eeprom_clk(hw, &eec);
1340 1763 }
1341 1764
1342 1765 return data;
1343 1766 }
1344 1767
1345 1768 /**
1346 1769 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1347 1770 * @hw: pointer to hardware structure
1348 1771 * @eec: EEC register's current value
1349 1772 **/
1350 1773 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1351 1774 {
1352 1775 DEBUGFUNC("ixgbe_raise_eeprom_clk");
1353 1776
1354 1777 /*
1355 1778 * Raise the clock input to the EEPROM
1356 1779 * (setting the SK bit), then delay
1357 1780 */
1358 1781 *eec = *eec | IXGBE_EEC_SK;
1359 1782 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1360 1783 IXGBE_WRITE_FLUSH(hw);
1361 1784 usec_delay(1);
1362 1785 }
1363 1786
1364 1787 /**
1365 1788 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1366 1789 * @hw: pointer to hardware structure
1367 1790 * @eecd: EECD's current value
1368 1791 **/
1369 1792 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1370 1793 {
1371 1794 DEBUGFUNC("ixgbe_lower_eeprom_clk");
1372 1795
1373 1796 /*
1374 1797 * Lower the clock input to the EEPROM (clearing the SK bit), then
1375 1798 * delay
1376 1799 */
1377 1800 *eec = *eec & ~IXGBE_EEC_SK;
1378 1801 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1379 1802 IXGBE_WRITE_FLUSH(hw);
1380 1803 usec_delay(1);
1381 1804 }
1382 1805
1383 1806 /**
1384 1807 * ixgbe_release_eeprom - Release EEPROM, release semaphores
1385 1808 * @hw: pointer to hardware structure
1386 1809 **/
1387 1810 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1388 1811 {
1389 1812 u32 eec;
1390 1813
1391 1814 DEBUGFUNC("ixgbe_release_eeprom");
1392 1815
1393 1816 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1394 1817
1395 1818 eec |= IXGBE_EEC_CS; /* Pull CS high */
1396 1819 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
|
↓ open down ↓ |
130 lines elided |
↑ open up ↑ |
1397 1820
1398 1821 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1399 1822 IXGBE_WRITE_FLUSH(hw);
1400 1823
1401 1824 usec_delay(1);
1402 1825
1403 1826 /* Stop requesting EEPROM access */
1404 1827 eec &= ~IXGBE_EEC_REQ;
1405 1828 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1406 1829
1407 - ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1830 + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1408 1831
1409 1832 /* Delay before attempt to obtain semaphore again to allow FW access */
1410 1833 msec_delay(hw->eeprom.semaphore_delay);
1411 1834 }
1412 1835
1413 1836 /**
1414 1837 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1415 1838 * @hw: pointer to hardware structure
1416 1839 **/
1417 1840 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1418 1841 {
1419 1842 u16 i;
1420 1843 u16 j;
1421 1844 u16 checksum = 0;
1422 1845 u16 length = 0;
1423 1846 u16 pointer = 0;
1424 1847 u16 word = 0;
1425 1848
1426 1849 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
1427 1850
1428 1851 /* Include 0x0-0x3F in the checksum */
1429 1852 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1430 1853 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
1431 1854 DEBUGOUT("EEPROM read failed\n");
1432 1855 break;
1433 1856 }
1434 1857 checksum += word;
1435 1858 }
1436 1859
1437 1860 /* Include all data from pointers except for the fw pointer */
1438 1861 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1439 1862 hw->eeprom.ops.read(hw, i, &pointer);
1440 1863
1441 1864 /* Make sure the pointer seems valid */
1442 1865 if (pointer != 0xFFFF && pointer != 0) {
1443 1866 hw->eeprom.ops.read(hw, pointer, &length);
1444 1867
1445 1868 if (length != 0xFFFF && length != 0) {
1446 1869 for (j = pointer+1; j <= pointer+length; j++) {
1447 1870 hw->eeprom.ops.read(hw, j, &word);
1448 1871 checksum += word;
1449 1872 }
1450 1873 }
1451 1874 }
1452 1875 }
1453 1876
1454 1877 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1455 1878
1456 1879 return checksum;
1457 1880 }
|
↓ open down ↓ |
40 lines elided |
↑ open up ↑ |
1458 1881
1459 1882 /**
1460 1883 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1461 1884 * @hw: pointer to hardware structure
1462 1885 * @checksum_val: calculated checksum
1463 1886 *
1464 1887 * Performs checksum calculation and validates the EEPROM checksum. If the
1465 1888 * caller does not need checksum_val, the value can be NULL.
1466 1889 **/
1467 1890 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1468 - u16 *checksum_val)
1891 + u16 *checksum_val)
1469 1892 {
1470 1893 s32 status;
1471 1894 u16 checksum;
1472 1895 u16 read_checksum = 0;
1473 1896
1474 1897 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
1475 1898
1476 1899 /*
1477 1900 * Read the first word from the EEPROM. If this times out or fails, do
1478 1901 * not continue or we could be in for a very long wait while every
1479 1902 * EEPROM read fails
1480 1903 */
1481 1904 status = hw->eeprom.ops.read(hw, 0, &checksum);
1482 1905
1483 1906 if (status == IXGBE_SUCCESS) {
1484 1907 checksum = hw->eeprom.ops.calc_checksum(hw);
1485 1908
1486 1909 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1487 1910
1488 1911 /*
1489 1912 * Verify read checksum from EEPROM is the same as
1490 1913 * calculated checksum
1491 1914 */
1492 1915 if (read_checksum != checksum)
1493 1916 status = IXGBE_ERR_EEPROM_CHECKSUM;
1494 1917
1495 1918 /* If the user cares, return the calculated checksum */
1496 1919 if (checksum_val)
1497 1920 *checksum_val = checksum;
1498 1921 } else {
1499 1922 DEBUGOUT("EEPROM read failed\n");
1500 1923 }
1501 1924
1502 1925 return status;
1503 1926 }
1504 1927
1505 1928 /**
1506 1929 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1507 1930 * @hw: pointer to hardware structure
1508 1931 **/
1509 1932 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1510 1933 {
1511 1934 s32 status;
1512 1935 u16 checksum;
1513 1936
1514 1937 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
1515 1938
|
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
1516 1939 /*
1517 1940 * Read the first word from the EEPROM. If this times out or fails, do
1518 1941 * not continue or we could be in for a very long wait while every
1519 1942 * EEPROM read fails
1520 1943 */
1521 1944 status = hw->eeprom.ops.read(hw, 0, &checksum);
1522 1945
1523 1946 if (status == IXGBE_SUCCESS) {
1524 1947 checksum = hw->eeprom.ops.calc_checksum(hw);
1525 1948 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1526 - checksum);
1949 + checksum);
1527 1950 } else {
1528 1951 DEBUGOUT("EEPROM read failed\n");
1529 1952 }
1530 1953
1531 1954 return status;
1532 1955 }
1533 1956
1534 1957 /**
1535 1958 * ixgbe_validate_mac_addr - Validate MAC address
1536 1959 * @mac_addr: pointer to MAC address.
1537 1960 *
1538 1961 * Tests a MAC address to ensure it is a valid Individual Address
1539 1962 **/
1540 1963 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
1541 1964 {
1542 1965 s32 status = IXGBE_SUCCESS;
1543 1966
1544 1967 DEBUGFUNC("ixgbe_validate_mac_addr");
1545 1968
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
1546 1969 /* Make sure it is not a multicast address */
1547 1970 if (IXGBE_IS_MULTICAST(mac_addr)) {
1548 1971 DEBUGOUT("MAC address is multicast\n");
1549 1972 status = IXGBE_ERR_INVALID_MAC_ADDR;
1550 1973 /* Not a broadcast address */
1551 1974 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
1552 1975 DEBUGOUT("MAC address is broadcast\n");
1553 1976 status = IXGBE_ERR_INVALID_MAC_ADDR;
1554 1977 /* Reject the zero address */
1555 1978 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1556 - mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1979 + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
1557 1980 DEBUGOUT("MAC address is all zeros\n");
1558 1981 status = IXGBE_ERR_INVALID_MAC_ADDR;
1559 1982 }
1560 1983 return status;
1561 1984 }
1562 1985
1563 1986 /**
1564 1987 * ixgbe_set_rar_generic - Set Rx address register
1565 1988 * @hw: pointer to hardware structure
1566 1989 * @index: Receive address register to write
1567 1990 * @addr: Address to put into receive address register
1568 1991 * @vmdq: VMDq "set" or "pool" index
1569 1992 * @enable_addr: set flag that address is active
1570 1993 *
1571 1994 * Puts an ethernet address into a receive address register.
1572 1995 **/
1573 1996 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1574 - u32 enable_addr)
1997 + u32 enable_addr)
1575 1998 {
1576 1999 u32 rar_low, rar_high;
1577 2000 u32 rar_entries = hw->mac.num_rar_entries;
1578 2001
1579 2002 DEBUGFUNC("ixgbe_set_rar_generic");
1580 2003
1581 2004 /* Make sure we are using a valid rar index range */
1582 2005 if (index >= rar_entries) {
1583 2006 DEBUGOUT1("RAR index %d is out of range.\n", index);
1584 2007 return IXGBE_ERR_INVALID_ARGUMENT;
1585 2008 }
1586 2009
1587 2010 /* setup VMDq pool selection before this RAR gets enabled */
1588 2011 hw->mac.ops.set_vmdq(hw, index, vmdq);
1589 2012
1590 2013 /*
1591 2014 * HW expects these in little endian so we reverse the byte
1592 2015 * order from network order (big endian) to little endian
1593 2016 */
1594 2017 rar_low = ((u32)addr[0] |
1595 - ((u32)addr[1] << 8) |
1596 - ((u32)addr[2] << 16) |
1597 - ((u32)addr[3] << 24));
2018 + ((u32)addr[1] << 8) |
2019 + ((u32)addr[2] << 16) |
2020 + ((u32)addr[3] << 24));
1598 2021 /*
1599 2022 * Some parts put the VMDq setting in the extra RAH bits,
1600 2023 * so save everything except the lower 16 bits that hold part
1601 2024 * of the address and the address valid bit.
1602 2025 */
1603 2026 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1604 2027 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1605 2028 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1606 2029
1607 2030 if (enable_addr != 0)
1608 2031 rar_high |= IXGBE_RAH_AV;
1609 2032
1610 2033 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1611 2034 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1612 2035
1613 2036 return IXGBE_SUCCESS;
1614 2037 }
1615 2038
1616 2039 /**
1617 2040 * ixgbe_clear_rar_generic - Remove Rx address register
1618 2041 * @hw: pointer to hardware structure
1619 2042 * @index: Receive address register to write
1620 2043 *
1621 2044 * Clears an ethernet address from a receive address register.
1622 2045 **/
1623 2046 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1624 2047 {
1625 2048 u32 rar_high;
1626 2049 u32 rar_entries = hw->mac.num_rar_entries;
1627 2050
1628 2051 DEBUGFUNC("ixgbe_clear_rar_generic");
1629 2052
1630 2053 /* Make sure we are using a valid rar index range */
1631 2054 if (index >= rar_entries) {
1632 2055 DEBUGOUT1("RAR index %d is out of range.\n", index);
1633 2056 return IXGBE_ERR_INVALID_ARGUMENT;
1634 2057 }
1635 2058
1636 2059 /*
1637 2060 * Some parts put the VMDq setting in the extra RAH bits,
1638 2061 * so save everything except the lower 16 bits that hold part
1639 2062 * of the address and the address valid bit.
1640 2063 */
1641 2064 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1642 2065 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1643 2066
1644 2067 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1645 2068 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1646 2069
1647 2070 /* clear VMDq pool/queue selection for this RAR */
1648 2071 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1649 2072
1650 2073 return IXGBE_SUCCESS;
1651 2074 }
1652 2075
1653 2076 /**
1654 2077 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1655 2078 * @hw: pointer to hardware structure
1656 2079 *
1657 2080 * Places the MAC address in receive address register 0 and clears the rest
1658 2081 * of the receive address registers. Clears the multicast table. Assumes
1659 2082 * the receiver is in reset when the routine is called.
1660 2083 **/
1661 2084 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1662 2085 {
1663 2086 u32 i;
1664 2087 u32 rar_entries = hw->mac.num_rar_entries;
1665 2088
1666 2089 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
1667 2090
1668 2091 /*
|
↓ open down ↓ |
61 lines elided |
↑ open up ↑ |
1669 2092 * If the current mac address is valid, assume it is a software override
1670 2093 * to the permanent address.
1671 2094 * Otherwise, use the permanent address from the eeprom.
1672 2095 */
1673 2096 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
1674 2097 IXGBE_ERR_INVALID_MAC_ADDR) {
1675 2098 /* Get the MAC address from the RAR0 for later reference */
1676 2099 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1677 2100
1678 2101 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
1679 - hw->mac.addr[0], hw->mac.addr[1],
1680 - hw->mac.addr[2]);
2102 + hw->mac.addr[0], hw->mac.addr[1],
2103 + hw->mac.addr[2]);
1681 2104 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1682 - hw->mac.addr[4], hw->mac.addr[5]);
2105 + hw->mac.addr[4], hw->mac.addr[5]);
1683 2106 } else {
1684 2107 /* Setup the receive address. */
1685 2108 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
1686 2109 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
1687 - hw->mac.addr[0], hw->mac.addr[1],
1688 - hw->mac.addr[2]);
2110 + hw->mac.addr[0], hw->mac.addr[1],
2111 + hw->mac.addr[2]);
1689 2112 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
1690 - hw->mac.addr[4], hw->mac.addr[5]);
2113 + hw->mac.addr[4], hw->mac.addr[5]);
1691 2114
1692 2115 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1693 2116
1694 2117 /* clear VMDq pool/queue selection for RAR 0 */
1695 2118 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1696 2119 }
1697 2120 hw->addr_ctrl.overflow_promisc = 0;
1698 2121
1699 2122 hw->addr_ctrl.rar_used_count = 1;
1700 2123
1701 2124 /* Zero out the other receive addresses. */
1702 2125 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
1703 2126 for (i = 1; i < rar_entries; i++) {
1704 2127 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1705 2128 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
1706 2129 }
1707 2130
1708 2131 /* Clear the MTA */
1709 2132 hw->addr_ctrl.mta_in_use = 0;
1710 2133 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1711 2134
1712 2135 DEBUGOUT(" Clearing MTA\n");
1713 2136 for (i = 0; i < hw->mac.mcft_size; i++)
1714 2137 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1715 2138
1716 - (void) ixgbe_init_uta_tables(hw);
2139 + ixgbe_init_uta_tables(hw);
1717 2140
1718 2141 return IXGBE_SUCCESS;
1719 2142 }
1720 2143
1721 2144 /**
1722 2145 * ixgbe_add_uc_addr - Adds a secondary unicast address.
1723 2146 * @hw: pointer to hardware structure
1724 2147 * @addr: new address
1725 2148 *
1726 2149 * Adds it to unused receive address register or goes into promiscuous mode.
1727 2150 **/
1728 2151 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1729 2152 {
1730 2153 u32 rar_entries = hw->mac.num_rar_entries;
1731 2154 u32 rar;
1732 2155
1733 2156 DEBUGFUNC("ixgbe_add_uc_addr");
1734 2157
1735 2158 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1736 - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2159 + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1737 2160
1738 2161 /*
1739 2162 * Place this address in the RAR if there is room,
1740 2163 * else put the controller into promiscuous mode
1741 2164 */
1742 2165 if (hw->addr_ctrl.rar_used_count < rar_entries) {
1743 2166 rar = hw->addr_ctrl.rar_used_count;
1744 2167 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1745 2168 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
1746 2169 hw->addr_ctrl.rar_used_count++;
1747 2170 } else {
1748 2171 hw->addr_ctrl.overflow_promisc++;
1749 2172 }
1750 2173
1751 2174 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
1752 2175 }
1753 2176
1754 2177 /**
1755 2178 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1756 2179 * @hw: pointer to hardware structure
1757 2180 * @addr_list: the list of new addresses
1758 2181 * @addr_count: number of addresses
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
1759 2182 * @next: iterator function to walk the address list
1760 2183 *
1761 2184 * The given list replaces any existing list. Clears the secondary addrs from
1762 2185 * receive address registers. Uses unused receive address registers for the
1763 2186 * first secondary addresses, and falls back to promiscuous mode as needed.
1764 2187 *
1765 2188 * Drivers using secondary unicast addresses must set user_set_promisc when
1766 2189 * manually putting the device into promiscuous mode.
1767 2190 **/
1768 2191 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
1769 - u32 addr_count, ixgbe_mc_addr_itr next)
2192 + u32 addr_count, ixgbe_mc_addr_itr next)
1770 2193 {
1771 2194 u8 *addr;
1772 2195 u32 i;
1773 2196 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1774 2197 u32 uc_addr_in_use;
1775 2198 u32 fctrl;
1776 2199 u32 vmdq;
1777 2200
1778 2201 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
1779 2202
1780 2203 /*
1781 2204 * Clear accounting of old secondary address list,
1782 2205 * don't count RAR[0]
1783 2206 */
1784 2207 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
1785 2208 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1786 2209 hw->addr_ctrl.overflow_promisc = 0;
1787 2210
1788 2211 /* Zero out the other receive addresses */
1789 2212 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
1790 2213 for (i = 0; i < uc_addr_in_use; i++) {
1791 2214 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
1792 2215 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
1793 2216 }
1794 2217
1795 2218 /* Add the new addresses */
1796 2219 for (i = 0; i < addr_count; i++) {
1797 2220 DEBUGOUT(" Adding the secondary addresses:\n");
1798 2221 addr = next(hw, &addr_list, &vmdq);
1799 2222 ixgbe_add_uc_addr(hw, addr, vmdq);
1800 2223 }
1801 2224
1802 2225 if (hw->addr_ctrl.overflow_promisc) {
1803 2226 /* enable promisc if not already in overflow or set by user */
1804 2227 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1805 2228 DEBUGOUT(" Entering address overflow promisc mode\n");
1806 2229 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1807 2230 fctrl |= IXGBE_FCTRL_UPE;
1808 2231 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1809 2232 }
1810 2233 } else {
1811 2234 /* only disable if set by overflow, not by user */
1812 2235 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1813 2236 DEBUGOUT(" Leaving address overflow promisc mode\n");
1814 2237 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1815 2238 fctrl &= ~IXGBE_FCTRL_UPE;
1816 2239 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1817 2240 }
1818 2241 }
1819 2242
1820 2243 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
1821 2244 return IXGBE_SUCCESS;
1822 2245 }
1823 2246
1824 2247 /**
1825 2248 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
1826 2249 * @hw: pointer to hardware structure
1827 2250 * @mc_addr: the multicast address
1828 2251 *
1829 2252 * Extracts the 12 bits, from a multicast address, to determine which
1830 2253 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
1831 2254 * incoming rx multicast addresses, to determine the bit-vector to check in
1832 2255 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
1833 2256 * by the MO field of the MCSTCTRL. The MO field is set during initialization
1834 2257 * to mc_filter_type.
1835 2258 **/
1836 2259 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
1837 2260 {
1838 2261 u32 vector = 0;
1839 2262
1840 2263 DEBUGFUNC("ixgbe_mta_vector");
1841 2264
1842 2265 switch (hw->mac.mc_filter_type) {
1843 2266 case 0: /* use bits [47:36] of the address */
1844 2267 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
1845 2268 break;
1846 2269 case 1: /* use bits [46:35] of the address */
1847 2270 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
1848 2271 break;
1849 2272 case 2: /* use bits [45:34] of the address */
1850 2273 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
1851 2274 break;
1852 2275 case 3: /* use bits [43:32] of the address */
1853 2276 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
1854 2277 break;
1855 2278 default: /* Invalid mc_filter_type */
1856 2279 DEBUGOUT("MC filter type param set incorrectly\n");
1857 2280 ASSERT(0);
1858 2281 break;
1859 2282 }
1860 2283
1861 2284 /* vector can only be 12-bits or boundary will be exceeded */
1862 2285 vector &= 0xFFF;
1863 2286 return vector;
1864 2287 }
1865 2288
1866 2289 /**
1867 2290 * ixgbe_set_mta - Set bit-vector in multicast table
1868 2291 * @hw: pointer to hardware structure
1869 2292 * @hash_value: Multicast address hash value
1870 2293 *
1871 2294 * Sets the bit-vector in the multicast table.
1872 2295 **/
1873 2296 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1874 2297 {
1875 2298 u32 vector;
1876 2299 u32 vector_bit;
1877 2300 u32 vector_reg;
1878 2301
1879 2302 DEBUGFUNC("ixgbe_set_mta");
1880 2303
1881 2304 hw->addr_ctrl.mta_in_use++;
1882 2305
1883 2306 vector = ixgbe_mta_vector(hw, mc_addr);
1884 2307 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
1885 2308
1886 2309 /*
1887 2310 * The MTA is a register array of 128 32-bit registers. It is treated
1888 2311 * like an array of 4096 bits. We want to set bit
1889 2312 * BitArray[vector_value]. So we figure out what register the bit is
1890 2313 * in, read it, OR in the new bit, then write back the new value. The
1891 2314 * register is determined by the upper 7 bits of the vector value and
1892 2315 * the bit within that register are determined by the lower 5 bits of
1893 2316 * the value.
1894 2317 */
1895 2318 vector_reg = (vector >> 5) & 0x7F;
|
↓ open down ↓ |
116 lines elided |
↑ open up ↑ |
1896 2319 vector_bit = vector & 0x1F;
1897 2320 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1898 2321 }
1899 2322
1900 2323 /**
1901 2324 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
1902 2325 * @hw: pointer to hardware structure
1903 2326 * @mc_addr_list: the list of new multicast addresses
1904 2327 * @mc_addr_count: number of addresses
1905 2328 * @next: iterator function to walk the multicast address list
2329 + * @clear: flag, when set clears the table beforehand
1906 2330 *
1907 - * The given list replaces any existing list. Clears the MC addrs from receive
1908 - * address registers and the multicast table. Uses unused receive address
1909 - * registers for the first multicast addresses, and hashes the rest into the
1910 - * multicast table.
2331 + * When the clear flag is set, the given list replaces any existing list.
2332 + * Hashes the given addresses into the multicast table.
1911 2333 **/
1912 2334 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
1913 - u32 mc_addr_count, ixgbe_mc_addr_itr next)
2335 + u32 mc_addr_count, ixgbe_mc_addr_itr next,
2336 + bool clear)
1914 2337 {
1915 2338 u32 i;
1916 2339 u32 vmdq;
1917 2340
1918 2341 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
1919 2342
1920 2343 /*
1921 2344 * Set the new number of MC addresses that we are being requested to
1922 2345 * use.
1923 2346 */
1924 2347 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
1925 2348 hw->addr_ctrl.mta_in_use = 0;
1926 2349
1927 2350 /* Clear mta_shadow */
1928 - DEBUGOUT(" Clearing MTA\n");
1929 - (void) memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2351 + if (clear) {
2352 + DEBUGOUT(" Clearing MTA\n");
2353 + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2354 + }
1930 2355
1931 2356 /* Update mta_shadow */
1932 2357 for (i = 0; i < mc_addr_count; i++) {
1933 2358 DEBUGOUT(" Adding the multicast addresses:\n");
1934 2359 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
1935 2360 }
1936 2361
1937 2362 /* Enable mta */
1938 2363 for (i = 0; i < hw->mac.mcft_size; i++)
1939 2364 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
1940 2365 hw->mac.mta_shadow[i]);
1941 2366
1942 2367 if (hw->addr_ctrl.mta_in_use > 0)
1943 2368 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
1944 - IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2369 + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
1945 2370
1946 2371 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
1947 2372 return IXGBE_SUCCESS;
1948 2373 }
1949 2374
1950 2375 /**
1951 2376 * ixgbe_enable_mc_generic - Enable multicast address in RAR
1952 2377 * @hw: pointer to hardware structure
1953 2378 *
1954 2379 * Enables multicast address in RAR and the use of the multicast hash table.
1955 2380 **/
1956 2381 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1957 2382 {
1958 2383 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1959 2384
1960 2385 DEBUGFUNC("ixgbe_enable_mc_generic");
1961 2386
1962 2387 if (a->mta_in_use > 0)
1963 2388 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
1964 - hw->mac.mc_filter_type);
2389 + hw->mac.mc_filter_type);
1965 2390
1966 2391 return IXGBE_SUCCESS;
1967 2392 }
1968 2393
1969 2394 /**
1970 2395 * ixgbe_disable_mc_generic - Disable multicast address in RAR
1971 2396 * @hw: pointer to hardware structure
1972 2397 *
1973 2398 * Disables multicast address in RAR and the use of the multicast hash table.
1974 2399 **/
1975 2400 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1976 2401 {
1977 2402 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1978 2403
1979 2404 DEBUGFUNC("ixgbe_disable_mc_generic");
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
1980 2405
1981 2406 if (a->mta_in_use > 0)
1982 2407 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1983 2408
1984 2409 return IXGBE_SUCCESS;
1985 2410 }
1986 2411
1987 2412 /**
1988 2413 * ixgbe_fc_enable_generic - Enable flow control
1989 2414 * @hw: pointer to hardware structure
1990 - * @packetbuf_num: packet buffer number (0-7)
1991 2415 *
1992 2416 * Enable flow control according to the current settings.
1993 2417 **/
1994 -s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
2418 +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
1995 2419 {
1996 2420 s32 ret_val = IXGBE_SUCCESS;
1997 2421 u32 mflcn_reg, fccfg_reg;
1998 2422 u32 reg;
1999 - u32 rx_pba_size;
2000 2423 u32 fcrtl, fcrth;
2424 + int i;
2001 2425
2002 2426 DEBUGFUNC("ixgbe_fc_enable_generic");
2003 2427
2004 - /* Negotiate the fc mode to use */
2005 - ret_val = ixgbe_fc_autoneg(hw);
2006 - if (ret_val == IXGBE_ERR_FLOW_CONTROL)
2428 + /* Validate the water mark configuration */
2429 + if (!hw->fc.pause_time) {
2430 + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2007 2431 goto out;
2432 + }
2008 2433
2434 + /* Low water mark of zero causes XOFF floods */
2435 + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2436 + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2437 + hw->fc.high_water[i]) {
2438 + if (!hw->fc.low_water[i] ||
2439 + hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2440 + DEBUGOUT("Invalid water mark configuration\n");
2441 + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2442 + goto out;
2443 + }
2444 + }
2445 + }
2446 +
2447 + /* Negotiate the fc mode to use */
2448 + ixgbe_fc_autoneg(hw);
2449 +
2009 2450 /* Disable any previous flow control settings */
2010 2451 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2011 - mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
2452 + mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2012 2453
2013 2454 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2014 2455 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2015 2456
2016 2457 /*
2017 2458 * The possible values of fc.current_mode are:
2018 2459 * 0: Flow control is completely disabled
2019 2460 * 1: Rx flow control is enabled (we can receive pause frames,
2020 2461 * but not send pause frames).
2021 2462 * 2: Tx flow control is enabled (we can send pause frames but
2022 2463 * we do not support receiving pause frames).
2023 2464 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2024 2465 * other: Invalid.
2025 2466 */
2026 2467 switch (hw->fc.current_mode) {
2027 2468 case ixgbe_fc_none:
2028 2469 /*
2029 2470 * Flow control is disabled by software override or autoneg.
2030 2471 * The code below will actually disable it in the HW.
2031 2472 */
2032 2473 break;
2033 2474 case ixgbe_fc_rx_pause:
2034 2475 /*
2035 2476 * Rx Flow control is enabled and Tx Flow control is
2036 2477 * disabled by software override. Since there really
2037 2478 * isn't a way to advertise that we are capable of RX
2038 2479 * Pause ONLY, we will advertise that we support both
2039 2480 * symmetric and asymmetric Rx PAUSE. Later, we will
2040 2481 * disable the adapter's ability to send PAUSE frames.
2041 2482 */
2042 2483 mflcn_reg |= IXGBE_MFLCN_RFCE;
2043 2484 break;
2044 2485 case ixgbe_fc_tx_pause:
2045 2486 /*
2046 2487 * Tx Flow control is enabled, and Rx Flow control is
2047 2488 * disabled by software override.
2048 2489 */
2049 2490 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2050 2491 break;
2051 2492 case ixgbe_fc_full:
2052 2493 /* Flow control (both Rx and Tx) is enabled by SW override. */
2053 2494 mflcn_reg |= IXGBE_MFLCN_RFCE;
2054 2495 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2055 2496 break;
2056 2497 default:
|
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
2057 2498 DEBUGOUT("Flow control param set incorrectly\n");
2058 2499 ret_val = IXGBE_ERR_CONFIG;
2059 2500 goto out;
2060 2501 }
2061 2502
2062 2503 /* Set 802.3x based flow control settings. */
2063 2504 mflcn_reg |= IXGBE_MFLCN_DPF;
2064 2505 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2065 2506 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2066 2507
2067 - rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
2068 - rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
2069 2508
2070 - fcrth = (rx_pba_size - hw->fc.high_water) << 10;
2071 - fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
2509 + /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2510 + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2511 + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2512 + hw->fc.high_water[i]) {
2513 + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2514 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2515 + fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2516 + } else {
2517 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2518 + /*
2519 + * In order to prevent Tx hangs when the internal Tx
2520 + * switch is enabled we must set the high water mark
2521 + * to the maximum FCRTH value. This allows the Tx
2522 + * switch to function even under heavy Rx workloads.
2523 + */
2524 + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2525 + }
2072 2526
2073 - if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
2074 - fcrth |= IXGBE_FCRTH_FCEN;
2075 - if (hw->fc.send_xon)
2076 - fcrtl |= IXGBE_FCRTL_XONE;
2527 + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2077 2528 }
2078 2529
2079 - IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
2080 - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
2081 -
2082 2530 /* Configure pause time (2 TCs per register) */
2083 - reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
2084 - if ((packetbuf_num & 1) == 0)
2085 - reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
2086 - else
2087 - reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
2088 - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
2531 + reg = hw->fc.pause_time * 0x00010001;
2532 + for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2533 + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2089 2534
2090 - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
2535 + /* Configure flow control refresh threshold value */
2536 + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2091 2537
2092 2538 out:
2093 2539 return ret_val;
2094 2540 }
2095 2541
2096 2542 /**
2097 - * ixgbe_fc_autoneg - Configure flow control
2543 + * ixgbe_negotiate_fc - Negotiate flow control
2098 2544 * @hw: pointer to hardware structure
2545 + * @adv_reg: flow control advertised settings
2546 + * @lp_reg: link partner's flow control settings
2547 + * @adv_sym: symmetric pause bit in advertisement
2548 + * @adv_asm: asymmetric pause bit in advertisement
2549 + * @lp_sym: symmetric pause bit in link partner advertisement
2550 + * @lp_asm: asymmetric pause bit in link partner advertisement
2099 2551 *
2100 - * Compares our advertised flow control capabilities to those advertised by
2101 - * our link partner, and determines the proper flow control mode to use.
2552 + * Find the intersection between advertised settings and link partner's
2553 + * advertised settings
2102 2554 **/
2103 -s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2555 +static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2556 + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2104 2557 {
2105 - s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2106 - ixgbe_link_speed speed;
2107 - bool link_up;
2558 + if ((!(adv_reg)) || (!(lp_reg)))
2559 + return IXGBE_ERR_FC_NOT_NEGOTIATED;
2108 2560
2109 - DEBUGFUNC("ixgbe_fc_autoneg");
2110 -
2111 - if (hw->fc.disable_fc_autoneg)
2112 - goto out;
2113 -
2114 - /*
2115 - * AN should have completed when the cable was plugged in.
2116 - * Look for reasons to bail out. Bail out if:
2117 - * - FC autoneg is disabled, or if
2118 - * - link is not up.
2119 - *
2120 - * Since we're being called from an LSC, link is already known to be up.
2121 - * So use link_up_wait_to_complete=FALSE.
2122 - */
2123 - hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2124 - if (!link_up) {
2125 - ret_val = IXGBE_ERR_FLOW_CONTROL;
2126 - goto out;
2127 - }
2128 -
2129 - switch (hw->phy.media_type) {
2130 - /* Autoneg flow control on fiber adapters */
2131 - case ixgbe_media_type_fiber:
2132 - if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2133 - ret_val = ixgbe_fc_autoneg_fiber(hw);
2134 - break;
2135 -
2136 - /* Autoneg flow control on backplane adapters */
2137 - case ixgbe_media_type_backplane:
2138 - ret_val = ixgbe_fc_autoneg_backplane(hw);
2139 - break;
2140 -
2141 - /* Autoneg flow control on copper adapters */
2142 - case ixgbe_media_type_copper:
2143 - if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
2144 - ret_val = ixgbe_fc_autoneg_copper(hw);
2145 - break;
2146 -
2147 - default:
2148 - break;
2149 - }
2150 -
2151 -out:
2152 - if (ret_val == IXGBE_SUCCESS) {
2153 - hw->fc.fc_was_autonegged = TRUE;
2561 + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2562 + /*
2563 + * Now we need to check if the user selected Rx ONLY
2564 + * of pause frames. In this case, we had to advertise
2565 + * FULL flow control because we could not advertise RX
2566 + * ONLY. Hence, we must now check to see if we need to
2567 + * turn OFF the TRANSMISSION of PAUSE frames.
2568 + */
2569 + if (hw->fc.requested_mode == ixgbe_fc_full) {
2570 + hw->fc.current_mode = ixgbe_fc_full;
2571 + DEBUGOUT("Flow Control = FULL.\n");
2572 + } else {
2573 + hw->fc.current_mode = ixgbe_fc_rx_pause;
2574 + DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2575 + }
2576 + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2577 + (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2578 + hw->fc.current_mode = ixgbe_fc_tx_pause;
2579 + DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2580 + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2581 + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2582 + hw->fc.current_mode = ixgbe_fc_rx_pause;
2583 + DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2154 2584 } else {
2155 - hw->fc.fc_was_autonegged = FALSE;
2156 - hw->fc.current_mode = hw->fc.requested_mode;
2585 + hw->fc.current_mode = ixgbe_fc_none;
2586 + DEBUGOUT("Flow Control = NONE.\n");
2157 2587 }
2158 - return ret_val;
2588 + return IXGBE_SUCCESS;
2159 2589 }
2160 2590
2161 2591 /**
2162 2592 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2163 2593 * @hw: pointer to hardware structure
2164 - * @speed:
2165 - * @link_up
2166 2594 *
2167 2595 * Enable flow control according on 1 gig fiber.
2168 2596 **/
2169 2597 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2170 2598 {
2171 2599 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2172 - s32 ret_val;
2600 + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2173 2601
2174 2602 /*
2175 2603 * On multispeed fiber at 1g, bail out if
2176 2604 * - link is up but AN did not complete, or if
2177 2605 * - link is up and AN completed but timed out
2178 2606 */
2179 2607
2180 2608 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2181 - if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2182 - ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2183 - ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2609 + if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2610 + (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2184 2611 goto out;
2185 - }
2186 2612
2187 2613 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2188 2614 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2189 2615
2190 2616 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2191 - pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2192 - IXGBE_PCS1GANA_ASM_PAUSE,
2193 - IXGBE_PCS1GANA_SYM_PAUSE,
2194 - IXGBE_PCS1GANA_ASM_PAUSE);
2617 + pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2618 + IXGBE_PCS1GANA_ASM_PAUSE,
2619 + IXGBE_PCS1GANA_SYM_PAUSE,
2620 + IXGBE_PCS1GANA_ASM_PAUSE);
2195 2621
2196 2622 out:
2197 2623 return ret_val;
2198 2624 }
2199 2625
2200 2626 /**
2201 2627 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2202 2628 * @hw: pointer to hardware structure
2203 2629 *
2204 2630 * Enable flow control according to IEEE clause 37.
2205 2631 **/
2206 2632 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2207 2633 {
2208 2634 u32 links2, anlp1_reg, autoc_reg, links;
2209 - s32 ret_val;
2635 + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2210 2636
2211 2637 /*
2212 2638 * On backplane, bail out if
2213 2639 * - backplane autoneg was not completed, or if
2214 2640 * - we are 82599 and link partner is not AN enabled
2215 2641 */
2216 2642 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2217 - if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2218 - hw->fc.fc_was_autonegged = FALSE;
2219 - hw->fc.current_mode = hw->fc.requested_mode;
2220 - ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2643 + if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2221 2644 goto out;
2222 - }
2223 2645
2224 2646 if (hw->mac.type == ixgbe_mac_82599EB) {
2225 2647 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2226 - if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2227 - hw->fc.fc_was_autonegged = FALSE;
2228 - hw->fc.current_mode = hw->fc.requested_mode;
2229 - ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2648 + if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2230 2649 goto out;
2231 - }
2232 2650 }
2233 2651 /*
2234 2652 * Read the 10g AN autoc and LP ability registers and resolve
2235 2653 * local flow control settings accordingly
2236 2654 */
2237 2655 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2238 2656 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2239 2657
2240 2658 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2241 2659 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2242 2660 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2243 2661
2244 2662 out:
2245 2663 return ret_val;
2246 2664 }
2247 2665
2248 2666 /**
2249 2667 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2250 2668 * @hw: pointer to hardware structure
2251 2669 *
2252 2670 * Enable flow control according to IEEE clause 37.
2253 2671 **/
2254 2672 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2255 2673 {
2256 2674 u16 technology_ability_reg = 0;
2257 2675 u16 lp_technology_ability_reg = 0;
2258 2676
2259 2677 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2260 2678 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2261 2679 &technology_ability_reg);
2262 2680 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
|
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
2263 2681 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2264 2682 &lp_technology_ability_reg);
2265 2683
2266 2684 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2267 2685 (u32)lp_technology_ability_reg,
2268 2686 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2269 2687 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2270 2688 }
2271 2689
2272 2690 /**
2273 - * ixgbe_negotiate_fc - Negotiate flow control
2691 + * ixgbe_fc_autoneg - Configure flow control
2274 2692 * @hw: pointer to hardware structure
2275 - * @adv_reg: flow control advertised settings
2276 - * @lp_reg: link partner's flow control settings
2277 - * @adv_sym: symmetric pause bit in advertisement
2278 - * @adv_asm: asymmetric pause bit in advertisement
2279 - * @lp_sym: symmetric pause bit in link partner advertisement
2280 - * @lp_asm: asymmetric pause bit in link partner advertisement
2281 2693 *
2282 - * Find the intersection between advertised settings and link partner's
2283 - * advertised settings
2694 + * Compares our advertised flow control capabilities to those advertised by
2695 + * our link partner, and determines the proper flow control mode to use.
2284 2696 **/
2285 -static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2286 - u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2697 +void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2287 2698 {
2288 - if ((!(adv_reg)) || (!(lp_reg)))
2289 - return IXGBE_ERR_FC_NOT_NEGOTIATED;
2699 + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2700 + ixgbe_link_speed speed;
2701 + bool link_up;
2290 2702
2291 - if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2292 - /*
2293 - * Now we need to check if the user selected Rx ONLY
2294 - * of pause frames. In this case, we had to advertise
2295 - * FULL flow control because we could not advertise RX
2296 - * ONLY. Hence, we must now check to see if we need to
2297 - * turn OFF the TRANSMISSION of PAUSE frames.
2298 - */
2299 - if (hw->fc.requested_mode == ixgbe_fc_full) {
2300 - hw->fc.current_mode = ixgbe_fc_full;
2301 - DEBUGOUT("Flow Control = FULL.\n");
2302 - } else {
2303 - hw->fc.current_mode = ixgbe_fc_rx_pause;
2304 - DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2305 - }
2306 - } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2307 - (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2308 - hw->fc.current_mode = ixgbe_fc_tx_pause;
2309 - DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2310 - } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2311 - !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2312 - hw->fc.current_mode = ixgbe_fc_rx_pause;
2313 - DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2314 - } else {
2315 - hw->fc.current_mode = ixgbe_fc_none;
2316 - DEBUGOUT("Flow Control = NONE.\n");
2317 - }
2318 - return IXGBE_SUCCESS;
2319 -}
2703 + DEBUGFUNC("ixgbe_fc_autoneg");
2320 2704
2321 -/**
2322 - * ixgbe_setup_fc - Set up flow control
2323 - * @hw: pointer to hardware structure
2324 - *
2325 - * Called at init time to set up flow control.
2326 - **/
2327 -s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
2328 -{
2329 - s32 ret_val = IXGBE_SUCCESS;
2330 - u32 reg = 0, reg_bp = 0;
2331 - u16 reg_cu = 0;
2332 -
2333 - DEBUGFUNC("ixgbe_setup_fc");
2334 -
2335 - /* Validate the packetbuf configuration */
2336 - if (packetbuf_num < 0 || packetbuf_num > 7) {
2337 - DEBUGOUT1("Invalid packet buffer number [%d], expected range is"
2338 - " 0-7\n", packetbuf_num);
2339 - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2340 - goto out;
2341 - }
2342 -
2343 2705 /*
2344 - * Validate the water mark configuration. Zero water marks are invalid
2345 - * because it causes the controller to just blast out fc packets.
2706 + * AN should have completed when the cable was plugged in.
2707 + * Look for reasons to bail out. Bail out if:
2708 + * - FC autoneg is disabled, or if
2709 + * - link is not up.
2346 2710 */
2347 - if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
2348 - DEBUGOUT("Invalid water mark configuration\n");
2349 - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2711 + if (hw->fc.disable_fc_autoneg)
2350 2712 goto out;
2351 - }
2352 2713
2353 - /*
2354 - * Validate the requested mode. Strict IEEE mode does not allow
2355 - * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
2356 - */
2357 - if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
2358 - DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
2359 - ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2714 + hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2715 + if (!link_up)
2360 2716 goto out;
2361 - }
2362 2717
2363 - /*
2364 - * 10gig parts do not have a word in the EEPROM to determine the
2365 - * default flow control setting, so we explicitly set it to full.
2366 - */
2367 - if (hw->fc.requested_mode == ixgbe_fc_default)
2368 - hw->fc.requested_mode = ixgbe_fc_full;
2369 -
2370 - /*
2371 - * Set up the 1G and 10G flow control advertisement registers so the
2372 - * HW will be able to do fc autoneg once the cable is plugged in. If
2373 - * we link at 10G, the 1G advertisement is harmless and vice versa.
2374 - */
2375 -
2376 2718 switch (hw->phy.media_type) {
2719 + /* Autoneg flow control on fiber adapters */
2377 2720 case ixgbe_media_type_fiber:
2721 + if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2722 + ret_val = ixgbe_fc_autoneg_fiber(hw);
2723 + break;
2724 +
2725 + /* Autoneg flow control on backplane adapters */
2378 2726 case ixgbe_media_type_backplane:
2379 - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2380 - reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2727 + ret_val = ixgbe_fc_autoneg_backplane(hw);
2381 2728 break;
2382 2729
2730 + /* Autoneg flow control on copper adapters */
2383 2731 case ixgbe_media_type_copper:
2384 - hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2385 - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
2732 + if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
2733 + ret_val = ixgbe_fc_autoneg_copper(hw);
2386 2734 break;
2387 2735
2388 2736 default:
2389 - ;
2390 - }
2391 -
2392 - /*
2393 - * The possible values of fc.requested_mode are:
2394 - * 0: Flow control is completely disabled
2395 - * 1: Rx flow control is enabled (we can receive pause frames,
2396 - * but not send pause frames).
2397 - * 2: Tx flow control is enabled (we can send pause frames but
2398 - * we do not support receiving pause frames).
2399 - * 3: Both Rx and Tx flow control (symmetric) are enabled.
2400 - * other: Invalid.
2401 - */
2402 - switch (hw->fc.requested_mode) {
2403 - case ixgbe_fc_none:
2404 - /* Flow control completely disabled by software override. */
2405 - reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2406 - if (hw->phy.media_type == ixgbe_media_type_backplane)
2407 - reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
2408 - IXGBE_AUTOC_ASM_PAUSE);
2409 - else if (hw->phy.media_type == ixgbe_media_type_copper)
2410 - reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2411 2737 break;
2412 - case ixgbe_fc_rx_pause:
2413 - /*
2414 - * Rx Flow control is enabled and Tx Flow control is
2415 - * disabled by software override. Since there really
2416 - * isn't a way to advertise that we are capable of RX
2417 - * Pause ONLY, we will advertise that we support both
2418 - * symmetric and asymmetric Rx PAUSE. Later, we will
2419 - * disable the adapter's ability to send PAUSE frames.
2420 - */
2421 - reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2422 - if (hw->phy.media_type == ixgbe_media_type_backplane)
2423 - reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2424 - IXGBE_AUTOC_ASM_PAUSE);
2425 - else if (hw->phy.media_type == ixgbe_media_type_copper)
2426 - reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2427 - break;
2428 - case ixgbe_fc_tx_pause:
2429 - /*
2430 - * Tx Flow control is enabled, and Rx Flow control is
2431 - * disabled by software override.
2432 - */
2433 - reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
2434 - reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
2435 - if (hw->phy.media_type == ixgbe_media_type_backplane) {
2436 - reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
2437 - reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
2438 - } else if (hw->phy.media_type == ixgbe_media_type_copper) {
2439 - reg_cu |= (IXGBE_TAF_ASM_PAUSE);
2440 - reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
2441 - }
2442 - break;
2443 - case ixgbe_fc_full:
2444 - /* Flow control (both Rx and Tx) is enabled by SW override. */
2445 - reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2446 - if (hw->phy.media_type == ixgbe_media_type_backplane)
2447 - reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2448 - IXGBE_AUTOC_ASM_PAUSE);
2449 - else if (hw->phy.media_type == ixgbe_media_type_copper)
2450 - reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2451 - break;
2452 - default:
2453 - DEBUGOUT("Flow control param set incorrectly\n");
2454 - ret_val = IXGBE_ERR_CONFIG;
2455 - goto out;
2456 2738 }
2457 2739
2458 - /*
2459 - * Enable auto-negotiation between the MAC & PHY;
2460 - * the MAC will advertise clause 37 flow control.
2461 - */
2462 - IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
2463 - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
2464 -
2465 - /* Disable AN timeout */
2466 - if (hw->fc.strict_ieee)
2467 - reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
2468 -
2469 - IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2470 - DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
2471 -
2472 - /*
2473 - * AUTOC restart handles negotiation of 1G and 10G on backplane
2474 - * and copper. There is no need to set the PCS1GCTL register.
2475 - *
2476 - */
2477 - if (hw->phy.media_type == ixgbe_media_type_backplane) {
2478 - reg_bp |= IXGBE_AUTOC_AN_RESTART;
2479 - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
2480 - } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
2481 - (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
2482 - hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2483 - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
2484 - }
2485 -
2486 - DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2487 2740 out:
2488 - return ret_val;
2741 + if (ret_val == IXGBE_SUCCESS) {
2742 + hw->fc.fc_was_autonegged = TRUE;
2743 + } else {
2744 + hw->fc.fc_was_autonegged = FALSE;
2745 + hw->fc.current_mode = hw->fc.requested_mode;
2746 + }
2489 2747 }
2490 2748
2491 2749 /**
2492 2750 * ixgbe_disable_pcie_master - Disable PCI-express master access
2493 2751 * @hw: pointer to hardware structure
2494 2752 *
2495 2753 * Disables PCI-Express master access and verifies there are no pending
2496 2754 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2497 2755 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2498 2756 * is returned signifying master requests disabled.
2499 2757 **/
2500 2758 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2501 2759 {
2502 - u32 i;
2503 - u32 reg_val;
2504 - u32 number_of_queues;
2505 2760 s32 status = IXGBE_SUCCESS;
2761 + u32 i;
2506 2762
2507 2763 DEBUGFUNC("ixgbe_disable_pcie_master");
2508 2764
2509 - /* Just jump out if bus mastering is already disabled */
2765 + /* Always set this bit to ensure any future transactions are blocked */
2766 + IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2767 +
2768 + /* Exit if master requets are blocked */
2510 2769 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2511 2770 goto out;
2512 2771
2513 - /* Disable the receive unit by stopping each queue */
2514 - number_of_queues = hw->mac.max_rx_queues;
2515 - for (i = 0; i < number_of_queues; i++) {
2516 - reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
2517 - if (reg_val & IXGBE_RXDCTL_ENABLE) {
2518 - reg_val &= ~IXGBE_RXDCTL_ENABLE;
2519 - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
2520 - }
2521 - }
2522 -
2523 - reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
2524 - reg_val |= IXGBE_CTRL_GIO_DIS;
2525 - IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
2526 -
2772 + /* Poll for master request bit to clear */
2527 2773 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2528 - if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2529 - goto check_device_status;
2530 2774 usec_delay(100);
2775 + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2776 + goto out;
2531 2777 }
2532 2778
2779 + /*
2780 + * Two consecutive resets are required via CTRL.RST per datasheet
2781 + * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2782 + * of this need. The first reset prevents new master requests from
2783 + * being issued by our device. We then must wait 1usec or more for any
2784 + * remaining completions from the PCIe bus to trickle in, and then reset
2785 + * again to clear out any effects they may have had on our device.
2786 + */
2533 2787 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
2534 - status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2788 + hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2535 2789
2536 2790 /*
2537 2791 * Before proceeding, make sure that the PCIe block does not have
2538 2792 * transactions pending.
2539 2793 */
2540 -check_device_status:
2541 2794 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2542 - if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2543 - IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2544 - break;
2545 2795 usec_delay(100);
2796 + if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
2797 + IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2798 + goto out;
2546 2799 }
2547 2800
2548 - if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
2549 - DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
2550 - else
2551 - goto out;
2801 + DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
2802 + status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2552 2803
2553 - /*
2554 - * Two consecutive resets are required via CTRL.RST per datasheet
2555 - * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2556 - * of this need. The first reset prevents new master requests from
2557 - * being issued by our device. We then must wait 1usec for any
2558 - * remaining completions from the PCIe bus to trickle in, and then reset
2559 - * again to clear out any effects they may have had on our device.
2560 - */
2561 - hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2562 -
2563 2804 out:
2564 2805 return status;
2565 2806 }
2566 2807
2567 -
2568 2808 /**
2569 2809 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2570 2810 * @hw: pointer to hardware structure
2571 2811 * @mask: Mask to specify which semaphore to acquire
2572 2812 *
2573 - * Acquires the SWFW semaphore thought the GSSR register for the specified
2813 + * Acquires the SWFW semaphore through the GSSR register for the specified
2574 2814 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2575 2815 **/
2576 2816 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2577 2817 {
2578 2818 u32 gssr;
2579 2819 u32 swmask = mask;
2580 2820 u32 fwmask = mask << 5;
2581 2821 s32 timeout = 200;
2582 2822
2583 2823 DEBUGFUNC("ixgbe_acquire_swfw_sync");
2584 2824
2585 2825 while (timeout) {
2586 2826 /*
2587 2827 * SW EEPROM semaphore bit is used for access to all
2588 2828 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2589 2829 */
2590 2830 if (ixgbe_get_eeprom_semaphore(hw))
2591 2831 return IXGBE_ERR_SWFW_SYNC;
2592 2832
2593 2833 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2594 2834 if (!(gssr & (fwmask | swmask)))
2595 2835 break;
2596 2836
2597 2837 /*
2598 2838 * Firmware currently using resource (fwmask) or other software
2599 2839 * thread currently using resource (swmask)
2600 2840 */
2601 2841 ixgbe_release_eeprom_semaphore(hw);
2602 2842 msec_delay(5);
2603 2843 timeout--;
2604 2844 }
2605 2845
2606 2846 if (!timeout) {
2607 2847 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
2608 2848 return IXGBE_ERR_SWFW_SYNC;
2609 2849 }
2610 2850
2611 2851 gssr |= swmask;
2612 2852 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
2613 2853
2614 2854 ixgbe_release_eeprom_semaphore(hw);
2615 2855 return IXGBE_SUCCESS;
2616 2856 }
2617 2857
2618 2858 /**
2619 2859 * ixgbe_release_swfw_sync - Release SWFW semaphore
2620 2860 * @hw: pointer to hardware structure
2621 2861 * @mask: Mask to specify which semaphore to release
2622 2862 *
2623 - * Releases the SWFW semaphore thought the GSSR register for the specified
2863 + * Releases the SWFW semaphore through the GSSR register for the specified
2624 2864 * function (CSR, PHY0, PHY1, EEPROM, Flash)
2625 2865 **/
2626 2866 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2627 2867 {
2628 2868 u32 gssr;
2629 2869 u32 swmask = mask;
2630 2870
2631 2871 DEBUGFUNC("ixgbe_release_swfw_sync");
2632 2872
2633 - (void) ixgbe_get_eeprom_semaphore(hw);
2873 + ixgbe_get_eeprom_semaphore(hw);
2634 2874
2635 2875 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2636 2876 gssr &= ~swmask;
2637 2877 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2638 2878
2639 2879 ixgbe_release_eeprom_semaphore(hw);
2640 2880 }
2641 2881
2642 2882 /**
2883 + * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
2884 + * @hw: pointer to hardware structure
2885 + *
2886 + * Stops the receive data path and waits for the HW to internally empty
2887 + * the Rx security block
2888 + **/
2889 +s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
2890 +{
2891 +#define IXGBE_MAX_SECRX_POLL 40
2892 +
2893 + int i;
2894 + int secrxreg;
2895 +
2896 + DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
2897 +
2898 +
2899 + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2900 + secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2901 + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2902 + for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2903 + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2904 + if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2905 + break;
2906 + else
2907 + /* Use interrupt-safe sleep just in case */
2908 + usec_delay(1000);
2909 + }
2910 +
2911 + /* For informational purposes only */
2912 + if (i >= IXGBE_MAX_SECRX_POLL)
2913 + DEBUGOUT("Rx unit being enabled before security "
2914 + "path fully disabled. Continuing with init.\n");
2915 +
2916 + return IXGBE_SUCCESS;
2917 +}
2918 +
2919 +/**
2920 + * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
2921 + * @hw: pointer to hardware structure
2922 + *
2923 + * Enables the receive data path.
2924 + **/
2925 +s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
2926 +{
2927 + int secrxreg;
2928 +
2929 + DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
2930 +
2931 + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2932 + secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2933 + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2934 + IXGBE_WRITE_FLUSH(hw);
2935 +
2936 + return IXGBE_SUCCESS;
2937 +}
2938 +
2939 +/**
2643 2940 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2644 2941 * @hw: pointer to hardware structure
2645 2942 * @regval: register value to write to RXCTRL
2646 2943 *
2647 2944 * Enables the Rx DMA unit
2648 2945 **/
2649 2946 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2650 2947 {
2651 2948 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
2652 2949
2653 2950 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2654 2951
2655 2952 return IXGBE_SUCCESS;
2656 2953 }
2657 2954
2658 2955 /**
2659 2956 * ixgbe_blink_led_start_generic - Blink LED based on index.
2660 2957 * @hw: pointer to hardware structure
2661 2958 * @index: led number to blink
2662 2959 **/
2663 2960 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2664 2961 {
2665 2962 ixgbe_link_speed speed = 0;
2666 2963 bool link_up = 0;
2667 2964 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2668 2965 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2669 2966
2670 2967 DEBUGFUNC("ixgbe_blink_led_start_generic");
2671 2968
|
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
2672 2969 /*
2673 2970 * Link must be up to auto-blink the LEDs;
2674 2971 * Force it if link is down.
2675 2972 */
2676 2973 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2677 2974
2678 2975 if (!link_up) {
2679 2976 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2680 2977 autoc_reg |= IXGBE_AUTOC_FLU;
2681 2978 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2979 + IXGBE_WRITE_FLUSH(hw);
2682 2980 msec_delay(10);
2683 2981 }
2684 2982
2685 2983 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2686 2984 led_reg |= IXGBE_LED_BLINK(index);
2687 2985 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2688 2986 IXGBE_WRITE_FLUSH(hw);
2689 2987
2690 2988 return IXGBE_SUCCESS;
2691 2989 }
2692 2990
2693 2991 /**
2694 2992 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2695 2993 * @hw: pointer to hardware structure
2696 2994 * @index: led number to stop blinking
2697 2995 **/
2698 2996 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2699 2997 {
2700 2998 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2701 2999 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2702 3000
2703 3001 DEBUGFUNC("ixgbe_blink_led_stop_generic");
2704 3002
2705 3003
2706 3004 autoc_reg &= ~IXGBE_AUTOC_FLU;
2707 3005 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2708 3006 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2709 3007
2710 3008 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2711 3009 led_reg &= ~IXGBE_LED_BLINK(index);
2712 3010 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2713 3011 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2714 3012 IXGBE_WRITE_FLUSH(hw);
2715 3013
2716 3014 return IXGBE_SUCCESS;
2717 3015 }
2718 3016
|
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
2719 3017 /**
2720 3018 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
2721 3019 * @hw: pointer to hardware structure
2722 3020 * @san_mac_offset: SAN MAC address offset
2723 3021 *
2724 3022 * This function will read the EEPROM location for the SAN MAC address
2725 3023 * pointer, and returns the value at that location. This is used in both
2726 3024 * get and set mac_addr routines.
2727 3025 **/
2728 3026 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2729 - u16 *san_mac_offset)
3027 + u16 *san_mac_offset)
2730 3028 {
2731 3029 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
2732 3030
2733 3031 /*
2734 3032 * First read the EEPROM pointer to see if the MAC addresses are
2735 3033 * available.
2736 3034 */
2737 3035 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
2738 3036
2739 3037 return IXGBE_SUCCESS;
2740 3038 }
2741 3039
2742 3040 /**
2743 3041 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
2744 3042 * @hw: pointer to hardware structure
2745 3043 * @san_mac_addr: SAN MAC address
2746 3044 *
2747 3045 * Reads the SAN MAC address from the EEPROM, if it's available. This is
2748 3046 * per-port, so set_lan_id() must be called before reading the addresses.
2749 3047 * set_lan_id() is called by identify_sfp(), but this cannot be relied
2750 3048 * upon for non-SFP connections, so we must call it here.
2751 3049 **/
2752 3050 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
2753 3051 {
2754 3052 u16 san_mac_data, san_mac_offset;
2755 3053 u8 i;
2756 3054
2757 3055 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
2758 3056
2759 3057 /*
2760 3058 * First read the EEPROM pointer to see if the MAC addresses are
2761 3059 * available. If they're not, no point in calling set_lan_id() here.
2762 3060 */
2763 - (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3061 + ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2764 3062
2765 3063 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2766 3064 /*
2767 3065 * No addresses available in this EEPROM. It's not an
2768 3066 * error though, so just wipe the local address and return.
2769 3067 */
2770 3068 for (i = 0; i < 6; i++)
2771 3069 san_mac_addr[i] = 0xFF;
2772 3070
2773 3071 goto san_mac_addr_out;
2774 3072 }
2775 3073
2776 3074 /* make sure we know which port we need to program */
2777 3075 hw->mac.ops.set_lan_id(hw);
2778 3076 /* apply the port offset to the address offset */
2779 3077 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2780 - (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3078 + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2781 3079 for (i = 0; i < 3; i++) {
2782 3080 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
2783 3081 san_mac_addr[i * 2] = (u8)(san_mac_data);
2784 3082 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2785 3083 san_mac_offset++;
2786 3084 }
2787 3085
2788 3086 san_mac_addr_out:
2789 3087 return IXGBE_SUCCESS;
2790 3088 }
2791 3089
2792 3090 /**
2793 3091 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
2794 3092 * @hw: pointer to hardware structure
2795 3093 * @san_mac_addr: SAN MAC address
2796 3094 *
2797 3095 * Write a SAN MAC address to the EEPROM.
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
2798 3096 **/
2799 3097 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2800 3098 {
2801 3099 s32 status = IXGBE_SUCCESS;
2802 3100 u16 san_mac_data, san_mac_offset;
2803 3101 u8 i;
2804 3102
2805 3103 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
2806 3104
2807 3105 /* Look for SAN mac address pointer. If not defined, return */
2808 - (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3106 + ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2809 3107
2810 3108 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2811 3109 status = IXGBE_ERR_NO_SAN_ADDR_PTR;
2812 3110 goto san_mac_addr_out;
2813 3111 }
2814 3112
2815 3113 /* Make sure we know which port we need to write */
2816 3114 hw->mac.ops.set_lan_id(hw);
2817 3115 /* Apply the port offset to the address offset */
2818 3116 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2819 - (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3117 + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2820 3118
2821 3119 for (i = 0; i < 3; i++) {
2822 3120 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
2823 3121 san_mac_data |= (u16)(san_mac_addr[i * 2]);
2824 3122 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
2825 3123 san_mac_offset++;
2826 3124 }
2827 3125
2828 3126 san_mac_addr_out:
2829 3127 return status;
2830 3128 }
2831 3129
2832 3130 /**
2833 3131 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
2834 3132 * @hw: pointer to hardware structure
2835 3133 *
2836 3134 * Read PCIe configuration space, and get the MSI-X vector count from
2837 3135 * the capabilities table.
2838 3136 **/
2839 -u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3137 +u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2840 3138 {
2841 - u32 msix_count = 64;
3139 + u16 msix_count = 1;
3140 + u16 max_msix_count;
3141 + u16 pcie_offset;
2842 3142
3143 + switch (hw->mac.type) {
3144 + case ixgbe_mac_82598EB:
3145 + pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3146 + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3147 + break;
3148 + case ixgbe_mac_82599EB:
3149 + case ixgbe_mac_X540:
3150 + pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3151 + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3152 + break;
3153 + default:
3154 + return msix_count;
3155 + }
3156 +
2843 3157 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
2844 - if (hw->mac.msix_vectors_from_pcie) {
2845 - msix_count = IXGBE_READ_PCIE_WORD(hw,
2846 - IXGBE_PCIE_MSIX_82599_CAPS);
2847 - msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3158 + msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3159 + msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2848 3160
2849 - /* MSI-X count is zero-based in HW, so increment to give
2850 - * proper value */
2851 - msix_count++;
2852 - }
3161 + /* MSI-X count is zero-based in HW */
3162 + msix_count++;
2853 3163
3164 + if (msix_count > max_msix_count)
3165 + msix_count = max_msix_count;
3166 +
2854 3167 return msix_count;
2855 3168 }
2856 3169
2857 3170 /**
2858 3171 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
2859 3172 * @hw: pointer to hardware structure
2860 3173 * @addr: Address to put into receive address register
2861 3174 * @vmdq: VMDq pool to assign
2862 3175 *
2863 3176 * Puts an ethernet address into a receive address register, or
2864 3177 * finds the rar that it is aleady in; adds to the pool list
2865 3178 **/
2866 3179 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2867 3180 {
2868 3181 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
2869 3182 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
2870 3183 u32 rar;
2871 3184 u32 rar_low, rar_high;
2872 3185 u32 addr_low, addr_high;
2873 3186
2874 3187 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
2875 3188
2876 3189 /* swap bytes for HW little endian */
2877 3190 addr_low = addr[0] | (addr[1] << 8)
2878 3191 | (addr[2] << 16)
2879 3192 | (addr[3] << 24);
2880 3193 addr_high = addr[4] | (addr[5] << 8);
2881 3194
2882 3195 /*
2883 3196 * Either find the mac_id in rar or find the first empty space.
2884 3197 * rar_highwater points to just after the highest currently used
2885 3198 * rar in order to shorten the search. It grows when we add a new
2886 3199 * rar to the top.
2887 3200 */
2888 3201 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
2889 3202 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
2890 3203
2891 3204 if (((IXGBE_RAH_AV & rar_high) == 0)
2892 3205 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
2893 3206 first_empty_rar = rar;
2894 3207 } else if ((rar_high & 0xFFFF) == addr_high) {
2895 3208 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
2896 3209 if (rar_low == addr_low)
2897 3210 break; /* found it already in the rars */
2898 3211 }
2899 3212 }
2900 3213
2901 3214 if (rar < hw->mac.rar_highwater) {
2902 3215 /* already there so just add to the pool bits */
2903 - (void) ixgbe_set_vmdq(hw, rar, vmdq);
3216 + ixgbe_set_vmdq(hw, rar, vmdq);
2904 3217 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
2905 3218 /* stick it into first empty RAR slot we found */
2906 3219 rar = first_empty_rar;
2907 - (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3220 + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2908 3221 } else if (rar == hw->mac.rar_highwater) {
2909 3222 /* add it to the top of the list and inc the highwater mark */
2910 - (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3223 + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2911 3224 hw->mac.rar_highwater++;
2912 3225 } else if (rar >= hw->mac.num_rar_entries) {
2913 3226 return IXGBE_ERR_INVALID_MAC_ADDR;
2914 3227 }
2915 3228
2916 3229 /*
2917 3230 * If we found rar[0], make sure the default pool bit (we use pool 0)
2918 3231 * remains cleared to be sure default pool packets will get delivered
2919 3232 */
2920 3233 if (rar == 0)
2921 - (void) ixgbe_clear_vmdq(hw, rar, 0);
3234 + ixgbe_clear_vmdq(hw, rar, 0);
2922 3235
2923 3236 return rar;
2924 3237 }
2925 3238
2926 3239 /**
2927 3240 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
2928 3241 * @hw: pointer to hardware struct
2929 3242 * @rar: receive address register index to disassociate
2930 3243 * @vmdq: VMDq pool index to remove from the rar
2931 3244 **/
2932 3245 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2933 3246 {
2934 3247 u32 mpsar_lo, mpsar_hi;
2935 3248 u32 rar_entries = hw->mac.num_rar_entries;
2936 3249
2937 3250 DEBUGFUNC("ixgbe_clear_vmdq_generic");
2938 3251
2939 3252 /* Make sure we are using a valid rar index range */
2940 3253 if (rar >= rar_entries) {
2941 3254 DEBUGOUT1("RAR index %d is out of range.\n", rar);
2942 3255 return IXGBE_ERR_INVALID_ARGUMENT;
2943 3256 }
2944 3257
2945 3258 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2946 3259 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2947 3260
2948 3261 if (!mpsar_lo && !mpsar_hi)
2949 3262 goto done;
2950 3263
2951 3264 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2952 3265 if (mpsar_lo) {
2953 3266 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2954 3267 mpsar_lo = 0;
2955 3268 }
2956 3269 if (mpsar_hi) {
2957 3270 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2958 3271 mpsar_hi = 0;
2959 3272 }
2960 3273 } else if (vmdq < 32) {
2961 3274 mpsar_lo &= ~(1 << vmdq);
2962 3275 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2963 3276 } else {
2964 3277 mpsar_hi &= ~(1 << (vmdq - 32));
2965 3278 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2966 3279 }
2967 3280
2968 3281 /* was that the last pool using this rar? */
2969 3282 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2970 3283 hw->mac.ops.clear_rar(hw, rar);
2971 3284 done:
2972 3285 return IXGBE_SUCCESS;
2973 3286 }
2974 3287
2975 3288 /**
2976 3289 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
2977 3290 * @hw: pointer to hardware struct
2978 3291 * @rar: receive address register index to associate with a VMDq index
2979 3292 * @vmdq: VMDq pool index
2980 3293 **/
2981 3294 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2982 3295 {
2983 3296 u32 mpsar;
2984 3297 u32 rar_entries = hw->mac.num_rar_entries;
2985 3298
2986 3299 DEBUGFUNC("ixgbe_set_vmdq_generic");
2987 3300
2988 3301 /* Make sure we are using a valid rar index range */
2989 3302 if (rar >= rar_entries) {
2990 3303 DEBUGOUT1("RAR index %d is out of range.\n", rar);
2991 3304 return IXGBE_ERR_INVALID_ARGUMENT;
2992 3305 }
2993 3306
2994 3307 if (vmdq < 32) {
2995 3308 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2996 3309 mpsar |= 1 << vmdq;
|
↓ open down ↓ |
65 lines elided |
↑ open up ↑ |
2997 3310 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2998 3311 } else {
2999 3312 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3000 3313 mpsar |= 1 << (vmdq - 32);
3001 3314 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3002 3315 }
3003 3316 return IXGBE_SUCCESS;
3004 3317 }
3005 3318
3006 3319 /**
3320 + * This function should only be involved in the IOV mode.
3321 + * In IOV mode, Default pool is next pool after the number of
3322 + * VFs advertized and not 0.
3323 + * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3324 + *
3325 + * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3326 + * @hw: pointer to hardware struct
3327 + * @vmdq: VMDq pool index
3328 + **/
3329 +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3330 +{
3331 + u32 rar = hw->mac.san_mac_rar_index;
3332 +
3333 + DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3334 +
3335 + if (vmdq < 32) {
3336 + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3337 + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3338 + } else {
3339 + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3340 + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3341 + }
3342 +
3343 + return IXGBE_SUCCESS;
3344 +}
3345 +
3346 +/**
3007 3347 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3008 3348 * @hw: pointer to hardware structure
3009 3349 **/
3010 3350 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3011 3351 {
3012 3352 int i;
3013 3353
3014 3354 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3015 3355 DEBUGOUT(" Clearing UTA\n");
3016 3356
3017 3357 for (i = 0; i < 128; i++)
3018 3358 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3019 3359
3020 3360 return IXGBE_SUCCESS;
3021 3361 }
3022 3362
3023 3363 /**
3024 3364 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3025 3365 * @hw: pointer to hardware structure
3026 3366 * @vlan: VLAN id to write to VLAN filter
3027 3367 *
3028 3368 * return the VLVF index where this VLAN id should be placed
3029 3369 *
3030 3370 **/
3031 3371 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3032 3372 {
3033 3373 u32 bits = 0;
3034 3374 u32 first_empty_slot = 0;
3035 3375 s32 regindex;
3036 3376
3037 3377 /* short cut the special case */
3038 3378 if (vlan == 0)
3039 3379 return 0;
3040 3380
3041 3381 /*
3042 3382 * Search for the vlan id in the VLVF entries. Save off the first empty
3043 3383 * slot found along the way
3044 3384 */
3045 3385 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3046 3386 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3047 3387 if (!bits && !(first_empty_slot))
3048 3388 first_empty_slot = regindex;
3049 3389 else if ((bits & 0x0FFF) == vlan)
3050 3390 break;
3051 3391 }
3052 3392
3053 3393 /*
3054 3394 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3055 3395 * in the VLVF. Else use the first empty VLVF register for this
3056 3396 * vlan id.
3057 3397 */
3058 3398 if (regindex >= IXGBE_VLVF_ENTRIES) {
3059 3399 if (first_empty_slot)
3060 3400 regindex = first_empty_slot;
3061 3401 else {
3062 3402 DEBUGOUT("No space in VLVF.\n");
3063 3403 regindex = IXGBE_ERR_NO_SPACE;
3064 3404 }
3065 3405 }
3066 3406
3067 3407 return regindex;
3068 3408 }
3069 3409
|
↓ open down ↓ |
53 lines elided |
↑ open up ↑ |
3070 3410 /**
3071 3411 * ixgbe_set_vfta_generic - Set VLAN filter table
3072 3412 * @hw: pointer to hardware structure
3073 3413 * @vlan: VLAN id to write to VLAN filter
3074 3414 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3075 3415 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3076 3416 *
3077 3417 * Turn on/off specified VLAN in the VLAN filter table.
3078 3418 **/
3079 3419 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3080 - bool vlan_on)
3420 + bool vlan_on)
3081 3421 {
3082 3422 s32 regindex;
3083 3423 u32 bitindex;
3084 3424 u32 vfta;
3085 - u32 bits;
3086 - u32 vt;
3087 3425 u32 targetbit;
3426 + s32 ret_val = IXGBE_SUCCESS;
3088 3427 bool vfta_changed = FALSE;
3089 3428
3090 3429 DEBUGFUNC("ixgbe_set_vfta_generic");
3091 3430
3092 3431 if (vlan > 4095)
3093 3432 return IXGBE_ERR_PARAM;
3094 3433
3095 3434 /*
3096 3435 * this is a 2 part operation - first the VFTA, then the
3097 3436 * VLVF and VLVFB if VT Mode is set
3098 3437 * We don't write the VFTA until we know the VLVF part succeeded.
3099 3438 */
3100 3439
3101 3440 /* Part 1
3102 3441 * The VFTA is a bitstring made up of 128 32-bit registers
3103 3442 * that enable the particular VLAN id, much like the MTA:
3104 3443 * bits[11-5]: which register
3105 3444 * bits[4-0]: which bit in the register
3106 3445 */
3107 3446 regindex = (vlan >> 5) & 0x7F;
3108 3447 bitindex = vlan & 0x1F;
3109 3448 targetbit = (1 << bitindex);
3110 3449 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3111 3450
3112 3451 if (vlan_on) {
3113 3452 if (!(vfta & targetbit)) {
3114 3453 vfta |= targetbit;
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
3115 3454 vfta_changed = TRUE;
3116 3455 }
3117 3456 } else {
3118 3457 if ((vfta & targetbit)) {
3119 3458 vfta &= ~targetbit;
3120 3459 vfta_changed = TRUE;
3121 3460 }
3122 3461 }
3123 3462
3124 3463 /* Part 2
3125 - * If VT Mode is set
3464 + * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3465 + */
3466 + ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3467 + &vfta_changed);
3468 + if (ret_val != IXGBE_SUCCESS)
3469 + return ret_val;
3470 +
3471 + if (vfta_changed)
3472 + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3473 +
3474 + return IXGBE_SUCCESS;
3475 +}
3476 +
3477 +/**
3478 + * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3479 + * @hw: pointer to hardware structure
3480 + * @vlan: VLAN id to write to VLAN filter
3481 + * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3482 + * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3483 + * @vfta_changed: pointer to boolean flag which indicates whether VFTA
3484 + * should be changed
3485 + *
3486 + * Turn on/off specified bit in VLVF table.
3487 + **/
3488 +s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3489 + bool vlan_on, bool *vfta_changed)
3490 +{
3491 + u32 vt;
3492 +
3493 + DEBUGFUNC("ixgbe_set_vlvf_generic");
3494 +
3495 + if (vlan > 4095)
3496 + return IXGBE_ERR_PARAM;
3497 +
3498 + /* If VT Mode is set
3126 3499 * Either vlan_on
3127 3500 * make sure the vlan is in VLVF
3128 3501 * set the vind bit in the matching VLVFB
3129 3502 * Or !vlan_on
3130 3503 * clear the pool bit and possibly the vind
3131 3504 */
3132 3505 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3133 3506 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3134 3507 s32 vlvf_index;
3508 + u32 bits;
3135 3509
3136 3510 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3137 3511 if (vlvf_index < 0)
3138 3512 return vlvf_index;
3139 3513
3140 3514 if (vlan_on) {
3141 3515 /* set the pool bit */
3142 3516 if (vind < 32) {
3143 3517 bits = IXGBE_READ_REG(hw,
3144 - IXGBE_VLVFB(vlvf_index*2));
3518 + IXGBE_VLVFB(vlvf_index * 2));
3145 3519 bits |= (1 << vind);
3146 3520 IXGBE_WRITE_REG(hw,
3147 - IXGBE_VLVFB(vlvf_index*2),
3521 + IXGBE_VLVFB(vlvf_index * 2),
3148 3522 bits);
3149 3523 } else {
3150 3524 bits = IXGBE_READ_REG(hw,
3151 - IXGBE_VLVFB((vlvf_index*2)+1));
3152 - bits |= (1 << (vind-32));
3525 + IXGBE_VLVFB((vlvf_index * 2) + 1));
3526 + bits |= (1 << (vind - 32));
3153 3527 IXGBE_WRITE_REG(hw,
3154 - IXGBE_VLVFB((vlvf_index*2)+1),
3155 - bits);
3528 + IXGBE_VLVFB((vlvf_index * 2) + 1),
3529 + bits);
3156 3530 }
3157 3531 } else {
3158 3532 /* clear the pool bit */
3159 3533 if (vind < 32) {
3160 3534 bits = IXGBE_READ_REG(hw,
3161 - IXGBE_VLVFB(vlvf_index*2));
3535 + IXGBE_VLVFB(vlvf_index * 2));
3162 3536 bits &= ~(1 << vind);
3163 3537 IXGBE_WRITE_REG(hw,
3164 - IXGBE_VLVFB(vlvf_index*2),
3538 + IXGBE_VLVFB(vlvf_index * 2),
3165 3539 bits);
3166 3540 bits |= IXGBE_READ_REG(hw,
3167 - IXGBE_VLVFB((vlvf_index*2)+1));
3541 + IXGBE_VLVFB((vlvf_index * 2) + 1));
3168 3542 } else {
3169 3543 bits = IXGBE_READ_REG(hw,
3170 - IXGBE_VLVFB((vlvf_index*2)+1));
3171 - bits &= ~(1 << (vind-32));
3544 + IXGBE_VLVFB((vlvf_index * 2) + 1));
3545 + bits &= ~(1 << (vind - 32));
3172 3546 IXGBE_WRITE_REG(hw,
3173 - IXGBE_VLVFB((vlvf_index*2)+1),
3174 - bits);
3547 + IXGBE_VLVFB((vlvf_index * 2) + 1),
3548 + bits);
3175 3549 bits |= IXGBE_READ_REG(hw,
3176 - IXGBE_VLVFB(vlvf_index*2));
3550 + IXGBE_VLVFB(vlvf_index * 2));
3177 3551 }
3178 3552 }
3179 3553
3180 3554 /*
3181 3555 * If there are still bits set in the VLVFB registers
3182 3556 * for the VLAN ID indicated we need to see if the
3183 3557 * caller is requesting that we clear the VFTA entry bit.
3184 3558 * If the caller has requested that we clear the VFTA
3185 3559 * entry bit but there are still pools/VFs using this VLAN
3186 3560 * ID entry then ignore the request. We're not worried
3187 3561 * about the case where we're turning the VFTA VLAN ID
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
3188 3562 * entry bit on, only when requested to turn it off as
3189 3563 * there may be multiple pools and/or VFs using the
3190 3564 * VLAN ID entry. In that case we cannot clear the
3191 3565 * VFTA bit until all pools/VFs using that VLAN ID have also
3192 3566 * been cleared. This will be indicated by "bits" being
3193 3567 * zero.
3194 3568 */
3195 3569 if (bits) {
3196 3570 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3197 3571 (IXGBE_VLVF_VIEN | vlan));
3198 - if (!vlan_on) {
3572 + if ((!vlan_on) && (vfta_changed != NULL)) {
3199 3573 /* someone wants to clear the vfta entry
3200 3574 * but some pools/VFs are still using it.
3201 3575 * Ignore it. */
3202 - vfta_changed = FALSE;
3576 + *vfta_changed = FALSE;
3203 3577 }
3204 - }
3205 - else
3578 + } else
3206 3579 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3207 3580 }
3208 3581
3209 - if (vfta_changed)
3210 - IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3211 -
3212 3582 return IXGBE_SUCCESS;
3213 3583 }
3214 3584
3215 3585 /**
3216 3586 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3217 3587 * @hw: pointer to hardware structure
3218 3588 *
3219 3589 * Clears the VLAN filer table, and the VMDq index associated with the filter
3220 3590 **/
3221 3591 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3222 3592 {
3223 3593 u32 offset;
3224 3594
3225 3595 DEBUGFUNC("ixgbe_clear_vfta_generic");
3226 3596
3227 3597 for (offset = 0; offset < hw->mac.vft_size; offset++)
3228 3598 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3229 3599
3230 3600 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3231 3601 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3232 - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
3233 - IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
3602 + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3603 + IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3234 3604 }
3235 3605
3236 3606 return IXGBE_SUCCESS;
3237 3607 }
3238 3608
3239 3609 /**
3240 3610 * ixgbe_check_mac_link_generic - Determine link and speed status
3241 3611 * @hw: pointer to hardware structure
3242 3612 * @speed: pointer to link speed
3243 3613 * @link_up: TRUE when link is up
3244 3614 * @link_up_wait_to_complete: bool used to wait for link up or not
3245 3615 *
3246 3616 * Reads the links register to determine if link is up and the current speed
3247 3617 **/
3248 3618 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3249 - bool *link_up, bool link_up_wait_to_complete)
3619 + bool *link_up, bool link_up_wait_to_complete)
3250 3620 {
3251 3621 u32 links_reg, links_orig;
3252 3622 u32 i;
3253 3623
3254 3624 DEBUGFUNC("ixgbe_check_mac_link_generic");
3255 3625
3256 3626 /* clear the old state */
3257 3627 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3258 3628
3259 3629 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3260 3630
3261 3631 if (links_orig != links_reg) {
3262 3632 DEBUGOUT2("LINKS changed from %08X to %08X\n",
3263 - links_orig, links_reg);
3633 + links_orig, links_reg);
3264 3634 }
3265 3635
3266 3636 if (link_up_wait_to_complete) {
3267 3637 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3268 3638 if (links_reg & IXGBE_LINKS_UP) {
3269 3639 *link_up = TRUE;
3270 3640 break;
3271 3641 } else {
3272 3642 *link_up = FALSE;
3273 3643 }
3274 3644 msec_delay(100);
3275 3645 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3276 3646 }
3277 3647 } else {
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
3278 3648 if (links_reg & IXGBE_LINKS_UP)
3279 3649 *link_up = TRUE;
3280 3650 else
3281 3651 *link_up = FALSE;
3282 3652 }
3283 3653
3284 3654 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3285 3655 IXGBE_LINKS_SPEED_10G_82599)
3286 3656 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3287 3657 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3288 - IXGBE_LINKS_SPEED_1G_82599)
3658 + IXGBE_LINKS_SPEED_1G_82599)
3289 3659 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3290 3660 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3291 - IXGBE_LINKS_SPEED_100_82599)
3661 + IXGBE_LINKS_SPEED_100_82599)
3292 3662 *speed = IXGBE_LINK_SPEED_100_FULL;
3293 3663 else
3294 3664 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3295 3665
3296 - /* if link is down, zero out the current_mode */
3297 - if (*link_up == FALSE) {
3298 - hw->fc.current_mode = ixgbe_fc_none;
3299 - hw->fc.fc_was_autonegged = FALSE;
3300 - }
3301 -
3302 3666 return IXGBE_SUCCESS;
3303 3667 }
3304 3668
3305 3669 /**
3306 3670 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3307 3671 * the EEPROM
3308 3672 * @hw: pointer to hardware structure
3309 3673 * @wwnn_prefix: the alternative WWNN prefix
3310 3674 * @wwpn_prefix: the alternative WWPN prefix
3311 3675 *
3312 3676 * This function will read the EEPROM from the alternative SAN MAC address
3313 3677 * block to check the support for the alternative WWNN/WWPN prefix support.
3314 3678 **/
3315 3679 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3316 - u16 *wwpn_prefix)
3680 + u16 *wwpn_prefix)
3317 3681 {
3318 3682 u16 offset, caps;
3319 3683 u16 alt_san_mac_blk_offset;
3320 3684
3321 3685 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
3322 3686
3323 3687 /* clear output first */
3324 3688 *wwnn_prefix = 0xFFFF;
3325 3689 *wwpn_prefix = 0xFFFF;
3326 3690
3327 3691 /* check if alternative SAN MAC is supported */
3328 3692 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
3329 - &alt_san_mac_blk_offset);
3693 + &alt_san_mac_blk_offset);
3330 3694
3331 3695 if ((alt_san_mac_blk_offset == 0) ||
3332 3696 (alt_san_mac_blk_offset == 0xFFFF))
3333 3697 goto wwn_prefix_out;
3334 3698
3335 3699 /* check capability in alternative san mac address block */
3336 3700 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3337 3701 hw->eeprom.ops.read(hw, offset, &caps);
3338 3702 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3339 3703 goto wwn_prefix_out;
3340 3704
3341 3705 /* get the corresponding prefix for WWNN/WWPN */
3342 3706 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3343 3707 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
3344 3708
3345 3709 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3346 3710 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
3347 3711
3348 3712 wwn_prefix_out:
3349 3713 return IXGBE_SUCCESS;
3350 3714 }
3351 3715
3352 3716 /**
3353 3717 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
3354 3718 * @hw: pointer to hardware structure
3355 3719 * @bs: the fcoe boot status
3356 3720 *
3357 3721 * This function will read the FCOE boot status from the iSCSI FCOE block
3358 3722 **/
3359 3723 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
3360 3724 {
3361 3725 u16 offset, caps, flags;
3362 3726 s32 status;
3363 3727
3364 3728 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
3365 3729
3366 3730 /* clear output first */
3367 3731 *bs = ixgbe_fcoe_bootstatus_unavailable;
3368 3732
3369 3733 /* check if FCOE IBA block is present */
3370 3734 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
3371 3735 status = hw->eeprom.ops.read(hw, offset, &caps);
3372 3736 if (status != IXGBE_SUCCESS)
3373 3737 goto out;
3374 3738
3375 3739 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
3376 3740 goto out;
3377 3741
3378 3742 /* check if iSCSI FCOE block is populated */
3379 3743 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
3380 3744 if (status != IXGBE_SUCCESS)
3381 3745 goto out;
3382 3746
3383 3747 if ((offset == 0) || (offset == 0xFFFF))
3384 3748 goto out;
3385 3749
3386 3750 /* read fcoe flags in iSCSI FCOE block */
3387 3751 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
3388 3752 status = hw->eeprom.ops.read(hw, offset, &flags);
3389 3753 if (status != IXGBE_SUCCESS)
3390 3754 goto out;
3391 3755
|
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
3392 3756 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
3393 3757 *bs = ixgbe_fcoe_bootstatus_enabled;
3394 3758 else
3395 3759 *bs = ixgbe_fcoe_bootstatus_disabled;
3396 3760
3397 3761 out:
3398 3762 return status;
3399 3763 }
3400 3764
3401 3765 /**
3402 - * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
3403 - * control
3404 - * @hw: pointer to hardware structure
3405 - *
3406 - * There are several phys that do not support autoneg flow control. This
3407 - * function check the device id to see if the associated phy supports
3408 - * autoneg flow control.
3409 - **/
3410 -static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
3411 -{
3412 -
3413 - DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
3414 -
3415 - switch (hw->device_id) {
3416 - case IXGBE_DEV_ID_82599_T3_LOM:
3417 - return IXGBE_SUCCESS;
3418 - default:
3419 - return IXGBE_ERR_FC_NOT_SUPPORTED;
3420 - }
3421 -}
3422 -
3423 -/**
3424 3766 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
3425 3767 * @hw: pointer to hardware structure
3426 3768 * @enable: enable or disable switch for anti-spoofing
3427 3769 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
3428 3770 *
3429 3771 **/
3430 3772 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
3431 3773 {
3432 3774 int j;
3433 3775 int pf_target_reg = pf >> 3;
3434 3776 int pf_target_shift = pf % 8;
3435 3777 u32 pfvfspoof = 0;
3436 3778
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
3437 3779 if (hw->mac.type == ixgbe_mac_82598EB)
3438 3780 return;
3439 3781
3440 3782 if (enable)
3441 3783 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
3442 3784
3443 3785 /*
3444 3786 * PFVFSPOOF register array is size 8 with 8 bits assigned to
3445 3787 * MAC anti-spoof enables in each register array element.
3446 3788 */
3447 - for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
3789 + for (j = 0; j < pf_target_reg; j++)
3448 3790 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3449 3791
3450 - /* If not enabling anti-spoofing then done */
3451 - if (!enable)
3452 - return;
3453 -
3454 3792 /*
3455 3793 * The PF should be allowed to spoof so that it can support
3456 - * emulation mode NICs. Reset the bit assigned to the PF
3794 + * emulation mode NICs. Do not set the bits assigned to the PF
3457 3795 */
3458 - pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
3459 - pfvfspoof ^= (1 << pf_target_shift);
3460 - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
3796 + pfvfspoof &= (1 << pf_target_shift) - 1;
3797 + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
3798 +
3799 + /*
3800 + * Remaining pools belong to the PF so they do not need to have
3801 + * anti-spoofing enabled.
3802 + */
3803 + for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
3804 + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
3461 3805 }
3462 3806
3463 3807 /**
3464 3808 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
3465 3809 * @hw: pointer to hardware structure
3466 3810 * @enable: enable or disable switch for VLAN anti-spoofing
3467 3811 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
3468 3812 *
3469 3813 **/
3470 3814 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3471 3815 {
3472 3816 int vf_target_reg = vf >> 3;
3473 3817 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
3474 3818 u32 pfvfspoof;
3475 3819
3476 3820 if (hw->mac.type == ixgbe_mac_82598EB)
3477 3821 return;
3478 3822
3479 3823 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3480 3824 if (enable)
3481 3825 pfvfspoof |= (1 << vf_target_shift);
3482 3826 else
3483 3827 pfvfspoof &= ~(1 << vf_target_shift);
3484 3828 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3485 3829 }
3486 3830
3487 3831 /**
3488 3832 * ixgbe_get_device_caps_generic - Get additional device capabilities
3489 3833 * @hw: pointer to hardware structure
3490 3834 * @device_caps: the EEPROM word with the extra device capabilities
3491 3835 *
3492 3836 * This function will read the EEPROM location for the device capabilities,
3493 3837 * and return the word through device_caps.
3494 3838 **/
3495 3839 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3496 3840 {
3497 3841 DEBUGFUNC("ixgbe_get_device_caps_generic");
3498 3842
3499 3843 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3500 3844
3501 3845 return IXGBE_SUCCESS;
3502 3846 }
3503 3847
3504 3848 /**
3505 3849 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
3506 3850 * @hw: pointer to hardware structure
3507 3851 *
3508 3852 **/
|
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
3509 3853 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
3510 3854 {
3511 3855 u32 regval;
3512 3856 u32 i;
3513 3857
3514 3858 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
3515 3859
3516 3860 /* Enable relaxed ordering */
3517 3861 for (i = 0; i < hw->mac.max_tx_queues; i++) {
3518 3862 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
3519 - regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
3863 + regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3520 3864 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
3521 3865 }
3522 3866
3523 3867 for (i = 0; i < hw->mac.max_rx_queues; i++) {
3524 3868 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
3525 - regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
3526 - IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
3869 + regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
3870 + IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
3527 3871 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
3528 3872 }
3529 3873
3530 3874 }
3875 +
3876 +/**
3877 + * ixgbe_calculate_checksum - Calculate checksum for buffer
3878 + * @buffer: pointer to EEPROM
3879 + * @length: size of EEPROM to calculate a checksum for
3880 + * Calculates the checksum for some buffer on a specified length. The
3881 + * checksum calculated is returned.
3882 + **/
3883 +static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3884 +{
3885 + u32 i;
3886 + u8 sum = 0;
3887 +
3888 + DEBUGFUNC("ixgbe_calculate_checksum");
3889 +
3890 + if (!buffer)
3891 + return 0;
3892 +
3893 + for (i = 0; i < length; i++)
3894 + sum += buffer[i];
3895 +
3896 + return (u8) (0 - sum);
3897 +}
3898 +
3899 +/**
3900 + * ixgbe_host_interface_command - Issue command to manageability block
3901 + * @hw: pointer to the HW structure
3902 + * @buffer: contains the command to write and where the return status will
3903 + * be placed
3904 + * @length: length of buffer, must be multiple of 4 bytes
3905 + *
3906 + * Communicates with the manageability block. On success return IXGBE_SUCCESS
3907 + * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
3908 + **/
3909 +static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3910 + u32 length)
3911 +{
3912 + u32 hicr, i, bi;
3913 + u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3914 + u8 buf_len, dword_len;
3915 +
3916 + s32 ret_val = IXGBE_SUCCESS;
3917 +
3918 + DEBUGFUNC("ixgbe_host_interface_command");
3919 +
3920 + if (length == 0 || length & 0x3 ||
3921 + length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3922 + DEBUGOUT("Buffer length failure.\n");
3923 + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3924 + goto out;
3925 + }
3926 +
3927 + /* Check that the host interface is enabled. */
3928 + hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3929 + if ((hicr & IXGBE_HICR_EN) == 0) {
3930 + DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
3931 + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3932 + goto out;
3933 + }
3934 +
3935 + /* Calculate length in DWORDs */
3936 + dword_len = length >> 2;
3937 +
3938 + /*
3939 + * The device driver writes the relevant command block
3940 + * into the ram area.
3941 + */
3942 + for (i = 0; i < dword_len; i++)
3943 + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3944 + i, IXGBE_CPU_TO_LE32(buffer[i]));
3945 +
3946 + /* Setting this bit tells the ARC that a new command is pending. */
3947 + IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3948 +
3949 + for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
3950 + hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3951 + if (!(hicr & IXGBE_HICR_C))
3952 + break;
3953 + msec_delay(1);
3954 + }
3955 +
3956 + /* Check command successful completion. */
3957 + if (i == IXGBE_HI_COMMAND_TIMEOUT ||
3958 + (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
3959 + DEBUGOUT("Command has failed with no status valid.\n");
3960 + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3961 + goto out;
3962 + }
3963 +
3964 + /* Calculate length in DWORDs */
3965 + dword_len = hdr_size >> 2;
3966 +
3967 + /* first pull in the header so we know the buffer length */
3968 + for (bi = 0; bi < dword_len; bi++) {
3969 + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3970 + IXGBE_LE32_TO_CPUS(&buffer[bi]);
3971 + }
3972 +
3973 + /* If there is any thing in data position pull it in */
3974 + buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
3975 + if (buf_len == 0)
3976 + goto out;
3977 +
3978 + if (length < (buf_len + hdr_size)) {
3979 + DEBUGOUT("Buffer not large enough for reply message.\n");
3980 + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3981 + goto out;
3982 + }
3983 +
3984 + /* Calculate length in DWORDs, add 3 for odd lengths */
3985 + dword_len = (buf_len + 3) >> 2;
3986 +
3987 + /* Pull in the rest of the buffer (bi is where we left off)*/
3988 + for (; bi <= dword_len; bi++) {
3989 + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3990 + IXGBE_LE32_TO_CPUS(&buffer[bi]);
3991 + }
3992 +
3993 +out:
3994 + return ret_val;
3995 +}
3996 +
3997 +/**
3998 + * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
3999 + * @hw: pointer to the HW structure
4000 + * @maj: driver version major number
4001 + * @min: driver version minor number
4002 + * @build: driver version build number
4003 + * @sub: driver version sub build number
4004 + *
4005 + * Sends driver version number to firmware through the manageability
4006 + * block. On success return IXGBE_SUCCESS
4007 + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4008 + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4009 + **/
4010 +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4011 + u8 build, u8 sub)
4012 +{
4013 + struct ixgbe_hic_drv_info fw_cmd;
4014 + int i;
4015 + s32 ret_val = IXGBE_SUCCESS;
4016 +
4017 + DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4018 +
4019 + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4020 + != IXGBE_SUCCESS) {
4021 + ret_val = IXGBE_ERR_SWFW_SYNC;
4022 + goto out;
4023 + }
4024 +
4025 + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4026 + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4027 + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4028 + fw_cmd.port_num = (u8)hw->bus.func;
4029 + fw_cmd.ver_maj = maj;
4030 + fw_cmd.ver_min = min;
4031 + fw_cmd.ver_build = build;
4032 + fw_cmd.ver_sub = sub;
4033 + fw_cmd.hdr.checksum = 0;
4034 + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4035 + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4036 + fw_cmd.pad = 0;
4037 + fw_cmd.pad2 = 0;
4038 +
4039 + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4040 + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4041 + sizeof(fw_cmd));
4042 + if (ret_val != IXGBE_SUCCESS)
4043 + continue;
4044 +
4045 + if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4046 + FW_CEM_RESP_STATUS_SUCCESS)
4047 + ret_val = IXGBE_SUCCESS;
4048 + else
4049 + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4050 +
4051 + break;
4052 + }
4053 +
4054 + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4055 +out:
4056 + return ret_val;
4057 +}
4058 +
4059 +/**
4060 + * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4061 + * @hw: pointer to hardware structure
4062 + * @num_pb: number of packet buffers to allocate
4063 + * @headroom: reserve n KB of headroom
4064 + * @strategy: packet buffer allocation strategy
4065 + **/
4066 +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4067 + int strategy)
4068 +{
4069 + u32 pbsize = hw->mac.rx_pb_size;
4070 + int i = 0;
4071 + u32 rxpktsize, txpktsize, txpbthresh;
4072 +
4073 + /* Reserve headroom */
4074 + pbsize -= headroom;
4075 +
4076 + if (!num_pb)
4077 + num_pb = 1;
4078 +
4079 + /* Divide remaining packet buffer space amongst the number of packet
4080 + * buffers requested using supplied strategy.
4081 + */
4082 + switch (strategy) {
4083 + case PBA_STRATEGY_WEIGHTED:
4084 + /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4085 + * buffer with 5/8 of the packet buffer space.
4086 + */
4087 + rxpktsize = (pbsize * 5) / (num_pb * 4);
4088 + pbsize -= rxpktsize * (num_pb / 2);
4089 + rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4090 + for (; i < (num_pb / 2); i++)
4091 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4092 + /* Fall through to configure remaining packet buffers */
4093 + case PBA_STRATEGY_EQUAL:
4094 + rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4095 + for (; i < num_pb; i++)
4096 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4097 + break;
4098 + default:
4099 + break;
4100 + }
4101 +
4102 + /* Only support an equally distributed Tx packet buffer strategy. */
4103 + txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4104 + txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4105 + for (i = 0; i < num_pb; i++) {
4106 + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4107 + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4108 + }
4109 +
4110 + /* Clear unused TCs, if any, to zero buffer size*/
4111 + for (; i < IXGBE_MAX_PB; i++) {
4112 + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4113 + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4114 + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4115 + }
4116 +}
4117 +
4118 +/**
4119 + * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4120 + * @hw: pointer to the hardware structure
4121 + *
4122 + * The 82599 and x540 MACs can experience issues if TX work is still pending
4123 + * when a reset occurs. This function prevents this by flushing the PCIe
4124 + * buffers on the system.
4125 + **/
4126 +void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4127 +{
4128 + u32 gcr_ext, hlreg0;
4129 +
4130 + /*
4131 + * If double reset is not requested then all transactions should
4132 + * already be clear and as such there is no work to do
4133 + */
4134 + if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4135 + return;
4136 +
4137 + /*
4138 + * Set loopback enable to prevent any transmits from being sent
4139 + * should the link come up. This assumes that the RXCTRL.RXEN bit
4140 + * has already been cleared.
4141 + */
4142 + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4143 + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4144 +
4145 + /* initiate cleaning flow for buffers in the PCIe transaction layer */
4146 + gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4147 + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4148 + gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4149 +
4150 + /* Flush all writes and allow 20usec for all transactions to clear */
4151 + IXGBE_WRITE_FLUSH(hw);
4152 + usec_delay(20);
4153 +
4154 + /* restore previous register values */
4155 + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4156 + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4157 +}
4158 +
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX