1 /******************************************************************************
2
3 Copyright (c) 2001-2013, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_common.c,v 1.14 2012/07/05 20:51:44 jfv Exp $*/
34
35 #include "ixgbe_common.h"
36 #include "ixgbe_phy.h"
37 #include "ixgbe_api.h"
38
39 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
40 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
41 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
42 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
43 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
44 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
45 u16 count);
46 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
47 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
48 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
50
51 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
52 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
53 u16 *san_mac_offset);
54 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
55 u16 words, u16 *data);
56 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
57 u16 words, u16 *data);
58 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
59 u16 offset);
60
61 /**
62 * ixgbe_init_ops_generic - Inits function ptrs
63 * @hw: pointer to the hardware structure
64 *
65 * Initialize the function pointers.
66 **/
67 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
68 {
69 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
70 struct ixgbe_mac_info *mac = &hw->mac;
71 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
72
73 DEBUGFUNC("ixgbe_init_ops_generic");
74
75 /* EEPROM */
76 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
77 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
78 if (eec & IXGBE_EEC_PRES) {
79 eeprom->ops.read = &ixgbe_read_eerd_generic;
80 eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
81 } else {
82 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
83 eeprom->ops.read_buffer =
84 &ixgbe_read_eeprom_buffer_bit_bang_generic;
85 }
86 eeprom->ops.write = &ixgbe_write_eeprom_generic;
87 eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
88 eeprom->ops.validate_checksum =
89 &ixgbe_validate_eeprom_checksum_generic;
90 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
91 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
92
93 /* MAC */
94 mac->ops.init_hw = &ixgbe_init_hw_generic;
95 mac->ops.reset_hw = NULL;
96 mac->ops.start_hw = &ixgbe_start_hw_generic;
97 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
98 mac->ops.get_media_type = NULL;
99 mac->ops.get_supported_physical_layer = NULL;
100 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
101 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
102 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
103 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
104 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
105 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
106 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
107
108 /* LEDs */
109 mac->ops.led_on = &ixgbe_led_on_generic;
110 mac->ops.led_off = &ixgbe_led_off_generic;
111 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
112 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
113
114 /* RAR, Multicast, VLAN */
115 mac->ops.set_rar = &ixgbe_set_rar_generic;
116 mac->ops.clear_rar = &ixgbe_clear_rar_generic;
117 mac->ops.insert_mac_addr = NULL;
118 mac->ops.set_vmdq = NULL;
119 mac->ops.clear_vmdq = NULL;
120 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
121 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
122 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
123 mac->ops.enable_mc = &ixgbe_enable_mc_generic;
124 mac->ops.disable_mc = &ixgbe_disable_mc_generic;
125 mac->ops.clear_vfta = NULL;
126 mac->ops.set_vfta = NULL;
127 mac->ops.set_vlvf = NULL;
128 mac->ops.init_uta_tables = NULL;
129
130 /* Flow Control */
131 mac->ops.fc_enable = &ixgbe_fc_enable_generic;
132
133 /* Link */
134 mac->ops.get_link_capabilities = NULL;
135 mac->ops.setup_link = NULL;
136 mac->ops.check_link = NULL;
137
138 return IXGBE_SUCCESS;
139 }
140
141 /**
142 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
143 * control
144 * @hw: pointer to hardware structure
145 *
146 * There are several phys that do not support autoneg flow control. This
147 * function check the device id to see if the associated phy supports
148 * autoneg flow control.
149 **/
150 s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
151 {
152
153 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
154
155 switch (hw->device_id) {
156 case IXGBE_DEV_ID_82599_T3_LOM:
157 case IXGBE_DEV_ID_X540T:
158 return IXGBE_SUCCESS;
159 default:
160 return IXGBE_ERR_FC_NOT_SUPPORTED;
161 }
162 }
163
164 /**
165 * ixgbe_setup_fc - Set up flow control
166 * @hw: pointer to hardware structure
167 *
168 * Called at init time to set up flow control.
169 **/
170 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
171 {
172 s32 ret_val = IXGBE_SUCCESS;
173 u32 reg = 0, reg_bp = 0;
174 u16 reg_cu = 0;
175 bool got_lock = FALSE;
176
177 DEBUGFUNC("ixgbe_setup_fc");
178
179 /*
180 * Validate the requested mode. Strict IEEE mode does not allow
181 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
182 */
183 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
184 DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
185 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
186 goto out;
187 }
188
189 /*
190 * 10gig parts do not have a word in the EEPROM to determine the
191 * default flow control setting, so we explicitly set it to full.
192 */
193 if (hw->fc.requested_mode == ixgbe_fc_default)
194 hw->fc.requested_mode = ixgbe_fc_full;
195
196 /*
197 * Set up the 1G and 10G flow control advertisement registers so the
198 * HW will be able to do fc autoneg once the cable is plugged in. If
199 * we link at 10G, the 1G advertisement is harmless and vice versa.
200 */
201 switch (hw->phy.media_type) {
202 case ixgbe_media_type_fiber_fixed:
203 case ixgbe_media_type_fiber:
204 case ixgbe_media_type_backplane:
205 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
206 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
207 break;
208 case ixgbe_media_type_copper:
209 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
210 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
211 break;
212 default:
213 break;
214 }
215
216 /*
217 * The possible values of fc.requested_mode are:
218 * 0: Flow control is completely disabled
219 * 1: Rx flow control is enabled (we can receive pause frames,
220 * but not send pause frames).
221 * 2: Tx flow control is enabled (we can send pause frames but
222 * we do not support receiving pause frames).
223 * 3: Both Rx and Tx flow control (symmetric) are enabled.
224 * other: Invalid.
225 */
226 switch (hw->fc.requested_mode) {
227 case ixgbe_fc_none:
228 /* Flow control completely disabled by software override. */
229 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
230 if (hw->phy.media_type == ixgbe_media_type_backplane)
231 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
232 IXGBE_AUTOC_ASM_PAUSE);
233 else if (hw->phy.media_type == ixgbe_media_type_copper)
234 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
235 break;
236 case ixgbe_fc_tx_pause:
237 /*
238 * Tx Flow control is enabled, and Rx Flow control is
239 * disabled by software override.
240 */
241 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
242 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
243 if (hw->phy.media_type == ixgbe_media_type_backplane) {
244 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
245 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
246 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
247 reg_cu |= IXGBE_TAF_ASM_PAUSE;
248 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
249 }
250 break;
251 case ixgbe_fc_rx_pause:
252 /*
253 * Rx Flow control is enabled and Tx Flow control is
254 * disabled by software override. Since there really
255 * isn't a way to advertise that we are capable of RX
256 * Pause ONLY, we will advertise that we support both
257 * symmetric and asymmetric Rx PAUSE, as such we fall
258 * through to the fc_full statement. Later, we will
259 * disable the adapter's ability to send PAUSE frames.
260 */
261 case ixgbe_fc_full:
262 /* Flow control (both Rx and Tx) is enabled by SW override. */
263 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
264 if (hw->phy.media_type == ixgbe_media_type_backplane)
265 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
266 IXGBE_AUTOC_ASM_PAUSE;
267 else if (hw->phy.media_type == ixgbe_media_type_copper)
268 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
269 break;
270 default:
271 DEBUGOUT("Flow control param set incorrectly\n");
272 ret_val = IXGBE_ERR_CONFIG;
273 goto out;
274 }
275
276 if (hw->mac.type != ixgbe_mac_X540) {
277 /*
278 * Enable auto-negotiation between the MAC & PHY;
279 * the MAC will advertise clause 37 flow control.
280 */
281 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
282 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
283
284 /* Disable AN timeout */
285 if (hw->fc.strict_ieee)
286 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
287
288 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
289 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
290 }
291
292 /*
293 * AUTOC restart handles negotiation of 1G and 10G on backplane
294 * and copper. There is no need to set the PCS1GCTL register.
295 *
296 */
297 if (hw->phy.media_type == ixgbe_media_type_backplane) {
298 reg_bp |= IXGBE_AUTOC_AN_RESTART;
299 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
300 * LESM is on, likewise reset_pipeline requries the lock as
301 * it also writes AUTOC.
302 */
303 if ((hw->mac.type == ixgbe_mac_82599EB) &&
304 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
305 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
306 IXGBE_GSSR_MAC_CSR_SM);
307 if (ret_val != IXGBE_SUCCESS) {
308 ret_val = IXGBE_ERR_SWFW_SYNC;
309 goto out;
310 }
311 got_lock = TRUE;
312 }
313
314 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
315 if (hw->mac.type == ixgbe_mac_82599EB)
316 (void) ixgbe_reset_pipeline_82599(hw);
317
318 if (got_lock)
319 hw->mac.ops.release_swfw_sync(hw,
320 IXGBE_GSSR_MAC_CSR_SM);
321 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
322 (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
323 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
324 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
325 }
326
327 DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
328 out:
329 return ret_val;
330 }
331
332 /**
333 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
334 * @hw: pointer to hardware structure
335 *
336 * Starts the hardware by filling the bus info structure and media type, clears
337 * all on chip counters, initializes receive address registers, multicast
338 * table, VLAN filter table, calls routine to set up link and flow control
339 * settings, and leaves transmit and receive units disabled and uninitialized
340 **/
341 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
342 {
343 s32 ret_val;
344 u32 ctrl_ext;
345
346 DEBUGFUNC("ixgbe_start_hw_generic");
347
348 /* Set the media type */
349 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
350
351 /* PHY ops initialization must be done in reset_hw() */
352
353 /* Clear the VLAN filter table */
354 hw->mac.ops.clear_vfta(hw);
355
356 /* Clear statistics registers */
357 hw->mac.ops.clear_hw_cntrs(hw);
358
359 /* Set No Snoop Disable */
360 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
361 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
362 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
363 IXGBE_WRITE_FLUSH(hw);
364
365 /* Setup flow control */
366 ret_val = ixgbe_setup_fc(hw);
367 if (ret_val != IXGBE_SUCCESS)
368 goto out;
369
370 /* Clear adapter stopped flag */
371 hw->adapter_stopped = FALSE;
372
373 out:
374 return ret_val;
375 }
376
377 /**
378 * ixgbe_start_hw_gen2 - Init sequence for common device family
379 * @hw: pointer to hw structure
380 *
381 * Performs the init sequence common to the second generation
382 * of 10 GbE devices.
383 * Devices in the second generation:
384 * 82599
385 * X540
386 **/
387 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
388 {
389 u32 i;
390 u32 regval;
391
392 /* Clear the rate limiters */
393 for (i = 0; i < hw->mac.max_tx_queues; i++) {
394 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
395 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
396 }
397 IXGBE_WRITE_FLUSH(hw);
398
399 /* Disable relaxed ordering */
400 for (i = 0; i < hw->mac.max_tx_queues; i++) {
401 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
402 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
403 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
404 }
405
406 for (i = 0; i < hw->mac.max_rx_queues; i++) {
407 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
408 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
409 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
410 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
411 }
412
413 return IXGBE_SUCCESS;
414 }
415
416 /**
417 * ixgbe_init_hw_generic - Generic hardware initialization
418 * @hw: pointer to hardware structure
419 *
420 * Initialize the hardware by resetting the hardware, filling the bus info
421 * structure and media type, clears all on chip counters, initializes receive
422 * address registers, multicast table, VLAN filter table, calls routine to set
423 * up link and flow control settings, and leaves transmit and receive units
424 * disabled and uninitialized
425 **/
426 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
427 {
428 s32 status;
429
430 DEBUGFUNC("ixgbe_init_hw_generic");
431
432 /* Reset the hardware */
433 status = hw->mac.ops.reset_hw(hw);
434
435 if (status == IXGBE_SUCCESS) {
436 /* Start the HW */
437 status = hw->mac.ops.start_hw(hw);
438 }
439
440 return status;
441 }
442
443 /**
444 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
445 * @hw: pointer to hardware structure
446 *
447 * Clears all hardware statistics counters by reading them from the hardware
448 * Statistics counters are clear on read.
449 **/
450 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
451 {
452 u16 i = 0;
453
454 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
455
456 (void) IXGBE_READ_REG(hw, IXGBE_CRCERRS);
457 (void) IXGBE_READ_REG(hw, IXGBE_ILLERRC);
458 (void) IXGBE_READ_REG(hw, IXGBE_ERRBC);
459 (void) IXGBE_READ_REG(hw, IXGBE_MSPDC);
460 for (i = 0; i < 8; i++)
461 (void) IXGBE_READ_REG(hw, IXGBE_MPC(i));
462
463 (void) IXGBE_READ_REG(hw, IXGBE_MLFC);
464 (void) IXGBE_READ_REG(hw, IXGBE_MRFC);
465 (void) IXGBE_READ_REG(hw, IXGBE_RLEC);
466 (void) IXGBE_READ_REG(hw, IXGBE_LXONTXC);
467 (void) IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
468 if (hw->mac.type >= ixgbe_mac_82599EB) {
469 (void) IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
470 (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
471 } else {
472 (void) IXGBE_READ_REG(hw, IXGBE_LXONRXC);
473 (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
474 }
475
476 for (i = 0; i < 8; i++) {
477 (void) IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
478 (void) IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
479 if (hw->mac.type >= ixgbe_mac_82599EB) {
480 (void) IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
481 (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
482 } else {
483 (void) IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
484 (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
485 }
486 }
487 if (hw->mac.type >= ixgbe_mac_82599EB)
488 for (i = 0; i < 8; i++)
489 (void) IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
490 (void) IXGBE_READ_REG(hw, IXGBE_PRC64);
491 (void) IXGBE_READ_REG(hw, IXGBE_PRC127);
492 (void) IXGBE_READ_REG(hw, IXGBE_PRC255);
493 (void) IXGBE_READ_REG(hw, IXGBE_PRC511);
494 (void) IXGBE_READ_REG(hw, IXGBE_PRC1023);
495 (void) IXGBE_READ_REG(hw, IXGBE_PRC1522);
496 (void) IXGBE_READ_REG(hw, IXGBE_GPRC);
497 (void) IXGBE_READ_REG(hw, IXGBE_BPRC);
498 (void) IXGBE_READ_REG(hw, IXGBE_MPRC);
499 (void) IXGBE_READ_REG(hw, IXGBE_GPTC);
500 (void) IXGBE_READ_REG(hw, IXGBE_GORCL);
501 (void) IXGBE_READ_REG(hw, IXGBE_GORCH);
502 (void) IXGBE_READ_REG(hw, IXGBE_GOTCL);
503 (void) IXGBE_READ_REG(hw, IXGBE_GOTCH);
504 if (hw->mac.type == ixgbe_mac_82598EB)
505 for (i = 0; i < 8; i++)
506 (void) IXGBE_READ_REG(hw, IXGBE_RNBC(i));
507 (void) IXGBE_READ_REG(hw, IXGBE_RUC);
508 (void) IXGBE_READ_REG(hw, IXGBE_RFC);
509 (void) IXGBE_READ_REG(hw, IXGBE_ROC);
510 (void) IXGBE_READ_REG(hw, IXGBE_RJC);
511 (void) IXGBE_READ_REG(hw, IXGBE_MNGPRC);
512 (void) IXGBE_READ_REG(hw, IXGBE_MNGPDC);
513 (void) IXGBE_READ_REG(hw, IXGBE_MNGPTC);
514 (void) IXGBE_READ_REG(hw, IXGBE_TORL);
515 (void) IXGBE_READ_REG(hw, IXGBE_TORH);
516 (void) IXGBE_READ_REG(hw, IXGBE_TPR);
517 (void) IXGBE_READ_REG(hw, IXGBE_TPT);
518 (void) IXGBE_READ_REG(hw, IXGBE_PTC64);
519 (void) IXGBE_READ_REG(hw, IXGBE_PTC127);
520 (void) IXGBE_READ_REG(hw, IXGBE_PTC255);
521 (void) IXGBE_READ_REG(hw, IXGBE_PTC511);
522 (void) IXGBE_READ_REG(hw, IXGBE_PTC1023);
523 (void) IXGBE_READ_REG(hw, IXGBE_PTC1522);
524 (void) IXGBE_READ_REG(hw, IXGBE_MPTC);
525 (void) IXGBE_READ_REG(hw, IXGBE_BPTC);
526 for (i = 0; i < 16; i++) {
527 (void) IXGBE_READ_REG(hw, IXGBE_QPRC(i));
528 (void) IXGBE_READ_REG(hw, IXGBE_QPTC(i));
529 if (hw->mac.type >= ixgbe_mac_82599EB) {
530 (void) IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
531 (void) IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
532 (void) IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
533 (void) IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
534 (void) IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
535 } else {
536 (void) IXGBE_READ_REG(hw, IXGBE_QBRC(i));
537 (void) IXGBE_READ_REG(hw, IXGBE_QBTC(i));
538 }
539 }
540
541 if (hw->mac.type == ixgbe_mac_X540) {
542 if (hw->phy.id == 0)
543 (void) ixgbe_identify_phy(hw);
544 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
545 IXGBE_MDIO_PCS_DEV_TYPE, &i);
546 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
547 IXGBE_MDIO_PCS_DEV_TYPE, &i);
548 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
549 IXGBE_MDIO_PCS_DEV_TYPE, &i);
550 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
551 IXGBE_MDIO_PCS_DEV_TYPE, &i);
552 }
553
554 return IXGBE_SUCCESS;
555 }
556
557 /**
558 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
559 * @hw: pointer to hardware structure
560 * @pba_num: stores the part number string from the EEPROM
561 * @pba_num_size: part number string buffer length
562 *
563 * Reads the part number string from the EEPROM.
564 **/
565 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
566 u32 pba_num_size)
567 {
568 s32 ret_val;
569 u16 data;
570 u16 pba_ptr;
571 u16 offset;
572 u16 length;
573
574 DEBUGFUNC("ixgbe_read_pba_string_generic");
575
576 if (pba_num == NULL) {
577 DEBUGOUT("PBA string buffer was null\n");
578 return IXGBE_ERR_INVALID_ARGUMENT;
579 }
580
581 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
582 if (ret_val) {
583 DEBUGOUT("NVM Read Error\n");
584 return ret_val;
585 }
586
587 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
588 if (ret_val) {
589 DEBUGOUT("NVM Read Error\n");
590 return ret_val;
591 }
592
593 /*
594 * if data is not ptr guard the PBA must be in legacy format which
595 * means pba_ptr is actually our second data word for the PBA number
596 * and we can decode it into an ascii string
597 */
598 if (data != IXGBE_PBANUM_PTR_GUARD) {
599 DEBUGOUT("NVM PBA number is not stored as string\n");
600
601 /* we will need 11 characters to store the PBA */
602 if (pba_num_size < 11) {
603 DEBUGOUT("PBA string buffer too small\n");
604 return IXGBE_ERR_NO_SPACE;
605 }
606
607 /* extract hex string from data and pba_ptr */
608 pba_num[0] = (data >> 12) & 0xF;
609 pba_num[1] = (data >> 8) & 0xF;
610 pba_num[2] = (data >> 4) & 0xF;
611 pba_num[3] = data & 0xF;
612 pba_num[4] = (pba_ptr >> 12) & 0xF;
613 pba_num[5] = (pba_ptr >> 8) & 0xF;
614 pba_num[6] = '-';
615 pba_num[7] = 0;
616 pba_num[8] = (pba_ptr >> 4) & 0xF;
617 pba_num[9] = pba_ptr & 0xF;
618
619 /* put a null character on the end of our string */
620 pba_num[10] = '\0';
621
622 /* switch all the data but the '-' to hex char */
623 for (offset = 0; offset < 10; offset++) {
624 if (pba_num[offset] < 0xA)
625 pba_num[offset] += '0';
626 else if (pba_num[offset] < 0x10)
627 pba_num[offset] += 'A' - 0xA;
628 }
629
630 return IXGBE_SUCCESS;
631 }
632
633 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
634 if (ret_val) {
635 DEBUGOUT("NVM Read Error\n");
636 return ret_val;
637 }
638
639 if (length == 0xFFFF || length == 0) {
640 DEBUGOUT("NVM PBA number section invalid length\n");
641 return IXGBE_ERR_PBA_SECTION;
642 }
643
644 /* check if pba_num buffer is big enough */
645 if (pba_num_size < (((u32)length * 2) - 1)) {
646 DEBUGOUT("PBA string buffer too small\n");
647 return IXGBE_ERR_NO_SPACE;
648 }
649
650 /* trim pba length from start of string */
651 pba_ptr++;
652 length--;
653
654 for (offset = 0; offset < length; offset++) {
655 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
656 if (ret_val) {
657 DEBUGOUT("NVM Read Error\n");
658 return ret_val;
659 }
660 pba_num[offset * 2] = (u8)(data >> 8);
661 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
662 }
663 pba_num[offset * 2] = '\0';
664
665 return IXGBE_SUCCESS;
666 }
667
668 /**
669 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
670 * @hw: pointer to hardware structure
671 * @pba_num: stores the part number from the EEPROM
672 *
673 * Reads the part number from the EEPROM.
674 **/
675 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
676 {
677 s32 ret_val;
678 u16 data;
679
680 DEBUGFUNC("ixgbe_read_pba_num_generic");
681
682 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
683 if (ret_val) {
684 DEBUGOUT("NVM Read Error\n");
685 return ret_val;
686 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
687 DEBUGOUT("NVM Not supported\n");
688 return IXGBE_NOT_IMPLEMENTED;
689 }
690 *pba_num = (u32)(data << 16);
691
692 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
693 if (ret_val) {
694 DEBUGOUT("NVM Read Error\n");
695 return ret_val;
696 }
697 *pba_num |= data;
698
699 return IXGBE_SUCCESS;
700 }
701
702 /**
703 * ixgbe_read_pba_raw
704 * @hw: pointer to the HW structure
705 * @eeprom_buf: optional pointer to EEPROM image
706 * @eeprom_buf_size: size of EEPROM image in words
707 * @max_pba_block_size: PBA block size limit
708 * @pba: pointer to output PBA structure
709 *
710 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
711 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
712 *
713 **/
714 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
715 u32 eeprom_buf_size, u16 max_pba_block_size,
716 struct ixgbe_pba *pba)
717 {
718 s32 ret_val;
719 u16 pba_block_size;
720
721 if (pba == NULL)
722 return IXGBE_ERR_PARAM;
723
724 if (eeprom_buf == NULL) {
725 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
726 &pba->word[0]);
727 if (ret_val)
728 return ret_val;
729 } else {
730 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
731 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
732 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
733 } else {
734 return IXGBE_ERR_PARAM;
735 }
736 }
737
738 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
739 if (pba->pba_block == NULL)
740 return IXGBE_ERR_PARAM;
741
742 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
743 eeprom_buf_size,
744 &pba_block_size);
745 if (ret_val)
746 return ret_val;
747
748 if (pba_block_size > max_pba_block_size)
749 return IXGBE_ERR_PARAM;
750
751 if (eeprom_buf == NULL) {
752 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
753 pba_block_size,
754 pba->pba_block);
755 if (ret_val)
756 return ret_val;
757 } else {
758 if (eeprom_buf_size > (u32)(pba->word[1] +
759 pba->pba_block[0])) {
760 (void) memcpy(pba->pba_block,
761 &eeprom_buf[pba->word[1]],
762 pba_block_size * sizeof(u16));
763 } else {
764 return IXGBE_ERR_PARAM;
765 }
766 }
767 }
768
769 return IXGBE_SUCCESS;
770 }
771
772 /**
773 * ixgbe_write_pba_raw
774 * @hw: pointer to the HW structure
775 * @eeprom_buf: optional pointer to EEPROM image
776 * @eeprom_buf_size: size of EEPROM image in words
777 * @pba: pointer to PBA structure
778 *
779 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
780 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
781 *
782 **/
783 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
784 u32 eeprom_buf_size, struct ixgbe_pba *pba)
785 {
786 s32 ret_val;
787
788 if (pba == NULL)
789 return IXGBE_ERR_PARAM;
790
791 if (eeprom_buf == NULL) {
792 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
793 &pba->word[0]);
794 if (ret_val)
795 return ret_val;
796 } else {
797 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
798 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
799 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
800 } else {
801 return IXGBE_ERR_PARAM;
802 }
803 }
804
805 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
806 if (pba->pba_block == NULL)
807 return IXGBE_ERR_PARAM;
808
809 if (eeprom_buf == NULL) {
810 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
811 pba->pba_block[0],
812 pba->pba_block);
813 if (ret_val)
814 return ret_val;
815 } else {
816 if (eeprom_buf_size > (u32)(pba->word[1] +
817 pba->pba_block[0])) {
818 (void) memcpy(&eeprom_buf[pba->word[1]],
819 pba->pba_block,
820 pba->pba_block[0] * sizeof(u16));
821 } else {
822 return IXGBE_ERR_PARAM;
823 }
824 }
825 }
826
827 return IXGBE_SUCCESS;
828 }
829
830 /**
831 * ixgbe_get_pba_block_size
832 * @hw: pointer to the HW structure
833 * @eeprom_buf: optional pointer to EEPROM image
834 * @eeprom_buf_size: size of EEPROM image in words
835 * @pba_data_size: pointer to output variable
836 *
837 * Returns the size of the PBA block in words. Function operates on EEPROM
838 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
839 * EEPROM device.
840 *
841 **/
842 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
843 u32 eeprom_buf_size, u16 *pba_block_size)
844 {
845 s32 ret_val;
846 u16 pba_word[2];
847 u16 length;
848
849 DEBUGFUNC("ixgbe_get_pba_block_size");
850
851 if (eeprom_buf == NULL) {
852 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
853 &pba_word[0]);
854 if (ret_val)
855 return ret_val;
856 } else {
857 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
858 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
859 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
860 } else {
861 return IXGBE_ERR_PARAM;
862 }
863 }
864
865 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
866 if (eeprom_buf == NULL) {
867 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
868 &length);
869 if (ret_val)
870 return ret_val;
871 } else {
872 if (eeprom_buf_size > pba_word[1])
873 length = eeprom_buf[pba_word[1] + 0];
874 else
875 return IXGBE_ERR_PARAM;
876 }
877
878 if (length == 0xFFFF || length == 0)
879 return IXGBE_ERR_PBA_SECTION;
880 } else {
881 /* PBA number in legacy format, there is no PBA Block. */
882 length = 0;
883 }
884
885 if (pba_block_size != NULL)
886 *pba_block_size = length;
887
888 return IXGBE_SUCCESS;
889 }
890
891 /**
892 * ixgbe_get_mac_addr_generic - Generic get MAC address
893 * @hw: pointer to hardware structure
894 * @mac_addr: Adapter MAC address
895 *
896 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
897 * A reset of the adapter must be performed prior to calling this function
898 * in order for the MAC address to have been loaded from the EEPROM into RAR0
899 **/
900 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
901 {
902 u32 rar_high;
903 u32 rar_low;
904 u16 i;
905
906 DEBUGFUNC("ixgbe_get_mac_addr_generic");
907
908 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
909 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
910
911 for (i = 0; i < 4; i++)
912 mac_addr[i] = (u8)(rar_low >> (i*8));
913
914 for (i = 0; i < 2; i++)
915 mac_addr[i+4] = (u8)(rar_high >> (i*8));
916
917 return IXGBE_SUCCESS;
918 }
919
920 /**
921 * ixgbe_get_bus_info_generic - Generic set PCI bus info
922 * @hw: pointer to hardware structure
923 *
924 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
925 **/
926 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
927 {
928 struct ixgbe_mac_info *mac = &hw->mac;
929 u16 link_status;
930
931 DEBUGFUNC("ixgbe_get_bus_info_generic");
932
933 hw->bus.type = ixgbe_bus_type_pci_express;
934
935 /* Get the negotiated link width and speed from PCI config space */
936 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
937
938 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
939 case IXGBE_PCI_LINK_WIDTH_1:
940 hw->bus.width = ixgbe_bus_width_pcie_x1;
941 break;
942 case IXGBE_PCI_LINK_WIDTH_2:
943 hw->bus.width = ixgbe_bus_width_pcie_x2;
944 break;
945 case IXGBE_PCI_LINK_WIDTH_4:
946 hw->bus.width = ixgbe_bus_width_pcie_x4;
947 break;
948 case IXGBE_PCI_LINK_WIDTH_8:
949 hw->bus.width = ixgbe_bus_width_pcie_x8;
950 break;
951 default:
952 hw->bus.width = ixgbe_bus_width_unknown;
953 break;
954 }
955
956 switch (link_status & IXGBE_PCI_LINK_SPEED) {
957 case IXGBE_PCI_LINK_SPEED_2500:
958 hw->bus.speed = ixgbe_bus_speed_2500;
959 break;
960 case IXGBE_PCI_LINK_SPEED_5000:
961 hw->bus.speed = ixgbe_bus_speed_5000;
962 break;
963 case IXGBE_PCI_LINK_SPEED_8000:
964 hw->bus.speed = ixgbe_bus_speed_8000;
965 break;
966 default:
967 hw->bus.speed = ixgbe_bus_speed_unknown;
968 break;
969 }
970
971 mac->ops.set_lan_id(hw);
972
973 return IXGBE_SUCCESS;
974 }
975
976 /**
977 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
978 * @hw: pointer to the HW structure
979 *
980 * Determines the LAN function id by reading memory-mapped registers
981 * and swaps the port value if requested.
982 **/
983 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
984 {
985 struct ixgbe_bus_info *bus = &hw->bus;
986 u32 reg;
987
988 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
989
990 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
991 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
992 bus->lan_id = bus->func;
993
994 /* check for a port swap */
995 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
996 if (reg & IXGBE_FACTPS_LFS)
997 bus->func ^= 0x1;
998 }
999
1000 /**
1001 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1002 * @hw: pointer to hardware structure
1003 *
1004 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1005 * disables transmit and receive units. The adapter_stopped flag is used by
1006 * the shared code and drivers to determine if the adapter is in a stopped
1007 * state and should not touch the hardware.
1008 **/
1009 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1010 {
1011 u32 reg_val;
1012 u16 i;
1013
1014 DEBUGFUNC("ixgbe_stop_adapter_generic");
1015
1016 /*
1017 * Set the adapter_stopped flag so other driver functions stop touching
1018 * the hardware
1019 */
1020 hw->adapter_stopped = TRUE;
1021
1022 /* Disable the receive unit */
1023 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
1024
1025 /* Clear interrupt mask to stop interrupts from being generated */
1026 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1027
1028 /* Clear any pending interrupts, flush previous writes */
1029 (void) IXGBE_READ_REG(hw, IXGBE_EICR);
1030
1031 /* Disable the transmit unit. Each queue must be disabled. */
1032 for (i = 0; i < hw->mac.max_tx_queues; i++)
1033 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1034
1035 /* Disable the receive unit by stopping each queue */
1036 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1037 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1038 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1039 reg_val |= IXGBE_RXDCTL_SWFLSH;
1040 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1041 }
1042
1043 /* flush all queues disables */
1044 IXGBE_WRITE_FLUSH(hw);
1045 msec_delay(2);
1046
1047 /*
1048 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
1049 * access and verify no pending requests
1050 */
1051 return ixgbe_disable_pcie_master(hw);
1052 }
1053
1054 /**
1055 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1056 * @hw: pointer to hardware structure
1057 * @index: led number to turn on
1058 **/
1059 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1060 {
1061 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1062
1063 DEBUGFUNC("ixgbe_led_on_generic");
1064
1065 /* To turn on the LED, set mode to ON. */
1066 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1067 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1068 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1069 IXGBE_WRITE_FLUSH(hw);
1070
1071 return IXGBE_SUCCESS;
1072 }
1073
1074 /**
1075 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1076 * @hw: pointer to hardware structure
1077 * @index: led number to turn off
1078 **/
1079 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1080 {
1081 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1082
1083 DEBUGFUNC("ixgbe_led_off_generic");
1084
1085 /* To turn off the LED, set mode to OFF. */
1086 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1087 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1088 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1089 IXGBE_WRITE_FLUSH(hw);
1090
1091 return IXGBE_SUCCESS;
1092 }
1093
1094 /**
1095 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1096 * @hw: pointer to hardware structure
1097 *
1098 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1099 * ixgbe_hw struct in order to set up EEPROM access.
1100 **/
1101 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1102 {
1103 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1104 u32 eec;
1105 u16 eeprom_size;
1106
1107 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1108
1109 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1110 eeprom->type = ixgbe_eeprom_none;
1111 /* Set default semaphore delay to 10ms which is a well
1112 * tested value */
1113 eeprom->semaphore_delay = 10;
1114 /* Clear EEPROM page size, it will be initialized as needed */
1115 eeprom->word_page_size = 0;
1116
1117 /*
1118 * Check for EEPROM present first.
1119 * If not present leave as none
1120 */
1121 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1122 if (eec & IXGBE_EEC_PRES) {
1123 eeprom->type = ixgbe_eeprom_spi;
1124
1125 /*
1126 * SPI EEPROM is assumed here. This code would need to
1127 * change if a future EEPROM is not SPI.
1128 */
1129 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1130 IXGBE_EEC_SIZE_SHIFT);
1131 eeprom->word_size = 1 << (eeprom_size +
1132 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1133 }
1134
1135 if (eec & IXGBE_EEC_ADDR_SIZE)
1136 eeprom->address_bits = 16;
1137 else
1138 eeprom->address_bits = 8;
1139 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1140 "%d\n", eeprom->type, eeprom->word_size,
1141 eeprom->address_bits);
1142 }
1143
1144 return IXGBE_SUCCESS;
1145 }
1146
1147 /**
1148 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1149 * @hw: pointer to hardware structure
1150 * @offset: offset within the EEPROM to write
1151 * @words: number of word(s)
1152 * @data: 16 bit word(s) to write to EEPROM
1153 *
1154 * Reads 16 bit word(s) from EEPROM through bit-bang method
1155 **/
1156 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1157 u16 words, u16 *data)
1158 {
1159 s32 status = IXGBE_SUCCESS;
1160 u16 i, count;
1161
1162 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1163
1164 hw->eeprom.ops.init_params(hw);
1165
1166 if (words == 0) {
1167 status = IXGBE_ERR_INVALID_ARGUMENT;
1168 goto out;
1169 }
1170
1171 if (offset + words > hw->eeprom.word_size) {
1172 status = IXGBE_ERR_EEPROM;
1173 goto out;
1174 }
1175
1176 /*
1177 * The EEPROM page size cannot be queried from the chip. We do lazy
1178 * initialization. It is worth to do that when we write large buffer.
1179 */
1180 if ((hw->eeprom.word_page_size == 0) &&
1181 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1182 status = ixgbe_detect_eeprom_page_size_generic(hw, offset);
1183 if (status != IXGBE_SUCCESS)
1184 goto out;
1185
1186 /*
1187 * We cannot hold synchronization semaphores for too long
1188 * to avoid other entity starvation. However it is more efficient
1189 * to read in bursts than synchronizing access for each word.
1190 */
1191 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1192 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1193 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1194 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1195 count, &data[i]);
1196
1197 if (status != IXGBE_SUCCESS)
1198 break;
1199 }
1200
1201 out:
1202 return status;
1203 }
1204
1205 /**
1206 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1207 * @hw: pointer to hardware structure
1208 * @offset: offset within the EEPROM to be written to
1209 * @words: number of word(s)
1210 * @data: 16 bit word(s) to be written to the EEPROM
1211 *
1212 * If ixgbe_eeprom_update_checksum is not called after this function, the
1213 * EEPROM will most likely contain an invalid checksum.
1214 **/
1215 static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1216 u16 words, u16 *data)
1217 {
1218 s32 status;
1219 u16 word;
1220 u16 page_size;
1221 u16 i;
1222 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1223
1224 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1225
1226 /* Prepare the EEPROM for writing */
1227 status = ixgbe_acquire_eeprom(hw);
1228
1229 if (status == IXGBE_SUCCESS) {
1230 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1231 ixgbe_release_eeprom(hw);
1232 status = IXGBE_ERR_EEPROM;
1233 }
1234 }
1235
1236 if (status == IXGBE_SUCCESS) {
1237 for (i = 0; i < words; i++) {
1238 ixgbe_standby_eeprom(hw);
1239
1240 /* Send the WRITE ENABLE command (8 bit opcode ) */
1241 ixgbe_shift_out_eeprom_bits(hw,
1242 IXGBE_EEPROM_WREN_OPCODE_SPI,
1243 IXGBE_EEPROM_OPCODE_BITS);
1244
1245 ixgbe_standby_eeprom(hw);
1246
1247 /*
1248 * Some SPI eeproms use the 8th address bit embedded
1249 * in the opcode
1250 */
1251 if ((hw->eeprom.address_bits == 8) &&
1252 ((offset + i) >= 128))
1253 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1254
1255 /* Send the Write command (8-bit opcode + addr) */
1256 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1257 IXGBE_EEPROM_OPCODE_BITS);
1258 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1259 hw->eeprom.address_bits);
1260
1261 page_size = hw->eeprom.word_page_size;
1262
1263 /* Send the data in burst via SPI*/
1264 do {
1265 word = data[i];
1266 word = (word >> 8) | (word << 8);
1267 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1268
1269 if (page_size == 0)
1270 break;
1271
1272 /* do not wrap around page */
1273 if (((offset + i) & (page_size - 1)) ==
1274 (page_size - 1))
1275 break;
1276 } while (++i < words);
1277
1278 ixgbe_standby_eeprom(hw);
1279 msec_delay(10);
1280 }
1281 /* Done with writing - release the EEPROM */
1282 ixgbe_release_eeprom(hw);
1283 }
1284
1285 return status;
1286 }
1287
1288 /**
1289 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1290 * @hw: pointer to hardware structure
1291 * @offset: offset within the EEPROM to be written to
1292 * @data: 16 bit word to be written to the EEPROM
1293 *
1294 * If ixgbe_eeprom_update_checksum is not called after this function, the
1295 * EEPROM will most likely contain an invalid checksum.
1296 **/
1297 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1298 {
1299 s32 status;
1300
1301 DEBUGFUNC("ixgbe_write_eeprom_generic");
1302
1303 hw->eeprom.ops.init_params(hw);
1304
1305 if (offset >= hw->eeprom.word_size) {
1306 status = IXGBE_ERR_EEPROM;
1307 goto out;
1308 }
1309
1310 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1311
1312 out:
1313 return status;
1314 }
1315
1316 /**
1317 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1318 * @hw: pointer to hardware structure
1319 * @offset: offset within the EEPROM to be read
1320 * @data: read 16 bit words(s) from EEPROM
1321 * @words: number of word(s)
1322 *
1323 * Reads 16 bit word(s) from EEPROM through bit-bang method
1324 **/
1325 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1326 u16 words, u16 *data)
1327 {
1328 s32 status = IXGBE_SUCCESS;
1329 u16 i, count;
1330
1331 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1332
1333 hw->eeprom.ops.init_params(hw);
1334
1335 if (words == 0) {
1336 status = IXGBE_ERR_INVALID_ARGUMENT;
1337 goto out;
1338 }
1339
1340 if (offset + words > hw->eeprom.word_size) {
1341 status = IXGBE_ERR_EEPROM;
1342 goto out;
1343 }
1344
1345 /*
1346 * We cannot hold synchronization semaphores for too long
1347 * to avoid other entity starvation. However it is more efficient
1348 * to read in bursts than synchronizing access for each word.
1349 */
1350 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1351 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1352 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1353
1354 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1355 count, &data[i]);
1356
1357 if (status != IXGBE_SUCCESS)
1358 break;
1359 }
1360
1361 out:
1362 return status;
1363 }
1364
1365 /**
1366 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1367 * @hw: pointer to hardware structure
1368 * @offset: offset within the EEPROM to be read
1369 * @words: number of word(s)
1370 * @data: read 16 bit word(s) from EEPROM
1371 *
1372 * Reads 16 bit word(s) from EEPROM through bit-bang method
1373 **/
1374 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1375 u16 words, u16 *data)
1376 {
1377 s32 status;
1378 u16 word_in;
1379 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1380 u16 i;
1381
1382 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1383
1384 /* Prepare the EEPROM for reading */
1385 status = ixgbe_acquire_eeprom(hw);
1386
1387 if (status == IXGBE_SUCCESS) {
1388 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1389 ixgbe_release_eeprom(hw);
1390 status = IXGBE_ERR_EEPROM;
1391 }
1392 }
1393
1394 if (status == IXGBE_SUCCESS) {
1395 for (i = 0; i < words; i++) {
1396 ixgbe_standby_eeprom(hw);
1397 /*
1398 * Some SPI eeproms use the 8th address bit embedded
1399 * in the opcode
1400 */
1401 if ((hw->eeprom.address_bits == 8) &&
1402 ((offset + i) >= 128))
1403 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1404
1405 /* Send the READ command (opcode + addr) */
1406 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1407 IXGBE_EEPROM_OPCODE_BITS);
1408 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1409 hw->eeprom.address_bits);
1410
1411 /* Read the data. */
1412 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1413 data[i] = (word_in >> 8) | (word_in << 8);
1414 }
1415
1416 /* End this read operation */
1417 ixgbe_release_eeprom(hw);
1418 }
1419
1420 return status;
1421 }
1422
1423 /**
1424 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1425 * @hw: pointer to hardware structure
1426 * @offset: offset within the EEPROM to be read
1427 * @data: read 16 bit value from EEPROM
1428 *
1429 * Reads 16 bit value from EEPROM through bit-bang method
1430 **/
1431 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1432 u16 *data)
1433 {
1434 s32 status;
1435
1436 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1437
1438 hw->eeprom.ops.init_params(hw);
1439
1440 if (offset >= hw->eeprom.word_size) {
1441 status = IXGBE_ERR_EEPROM;
1442 goto out;
1443 }
1444
1445 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1446
1447 out:
1448 return status;
1449 }
1450
1451 /**
1452 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1453 * @hw: pointer to hardware structure
1454 * @offset: offset of word in the EEPROM to read
1455 * @words: number of word(s)
1456 * @data: 16 bit word(s) from the EEPROM
1457 *
1458 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1459 **/
1460 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1461 u16 words, u16 *data)
1462 {
1463 u32 eerd;
1464 s32 status = IXGBE_SUCCESS;
1465 u32 i;
1466
1467 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1468
1469 hw->eeprom.ops.init_params(hw);
1470
1471 if (words == 0) {
1472 status = IXGBE_ERR_INVALID_ARGUMENT;
1473 goto out;
1474 }
1475
1476 if (offset >= hw->eeprom.word_size) {
1477 status = IXGBE_ERR_EEPROM;
1478 goto out;
1479 }
1480
1481 for (i = 0; i < words; i++) {
1482 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1483 IXGBE_EEPROM_RW_REG_START;
1484
1485 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1486 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1487
1488 if (status == IXGBE_SUCCESS) {
1489 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1490 IXGBE_EEPROM_RW_REG_DATA);
1491 } else {
1492 DEBUGOUT("Eeprom read timed out\n");
1493 goto out;
1494 }
1495 }
1496 out:
1497 return status;
1498 }
1499
1500 /**
1501 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1502 * @hw: pointer to hardware structure
1503 * @offset: offset within the EEPROM to be used as a scratch pad
1504 *
1505 * Discover EEPROM page size by writing marching data at given offset.
1506 * This function is called only when we are writing a new large buffer
1507 * at given offset so the data would be overwritten anyway.
1508 **/
1509 static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1510 u16 offset)
1511 {
1512 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1513 s32 status = IXGBE_SUCCESS;
1514 u16 i;
1515
1516 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1517
1518 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1519 data[i] = i;
1520
1521 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1522 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1523 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1524 hw->eeprom.word_page_size = 0;
1525 if (status != IXGBE_SUCCESS)
1526 goto out;
1527
1528 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1529 if (status != IXGBE_SUCCESS)
1530 goto out;
1531
1532 /*
1533 * When writing in burst more than the actual page size
1534 * EEPROM address wraps around current page.
1535 */
1536 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1537
1538 DEBUGOUT1("Detected EEPROM page size = %d words.",
1539 hw->eeprom.word_page_size);
1540 out:
1541 return status;
1542 }
1543
1544 /**
1545 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1546 * @hw: pointer to hardware structure
1547 * @offset: offset of word in the EEPROM to read
1548 * @data: word read from the EEPROM
1549 *
1550 * Reads a 16 bit word from the EEPROM using the EERD register.
1551 **/
1552 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1553 {
1554 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1555 }
1556
1557 /**
1558 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1559 * @hw: pointer to hardware structure
1560 * @offset: offset of word in the EEPROM to write
1561 * @words: number of word(s)
1562 * @data: word(s) write to the EEPROM
1563 *
1564 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1565 **/
1566 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1567 u16 words, u16 *data)
1568 {
1569 u32 eewr;
1570 s32 status = IXGBE_SUCCESS;
1571 u16 i;
1572
1573 DEBUGFUNC("ixgbe_write_eewr_generic");
1574
1575 hw->eeprom.ops.init_params(hw);
1576
1577 if (words == 0) {
1578 status = IXGBE_ERR_INVALID_ARGUMENT;
1579 goto out;
1580 }
1581
1582 if (offset >= hw->eeprom.word_size) {
1583 status = IXGBE_ERR_EEPROM;
1584 goto out;
1585 }
1586
1587 for (i = 0; i < words; i++) {
1588 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1589 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1590 IXGBE_EEPROM_RW_REG_START;
1591
1592 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1593 if (status != IXGBE_SUCCESS) {
1594 DEBUGOUT("Eeprom write EEWR timed out\n");
1595 goto out;
1596 }
1597
1598 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1599
1600 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1601 if (status != IXGBE_SUCCESS) {
1602 DEBUGOUT("Eeprom write EEWR timed out\n");
1603 goto out;
1604 }
1605 }
1606
1607 out:
1608 return status;
1609 }
1610
1611 /**
1612 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1613 * @hw: pointer to hardware structure
1614 * @offset: offset of word in the EEPROM to write
1615 * @data: word write to the EEPROM
1616 *
1617 * Write a 16 bit word to the EEPROM using the EEWR register.
1618 **/
1619 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1620 {
1621 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1622 }
1623
1624 /**
1625 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1626 * @hw: pointer to hardware structure
1627 * @ee_reg: EEPROM flag for polling
1628 *
1629 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1630 * read or write is done respectively.
1631 **/
1632 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1633 {
1634 u32 i;
1635 u32 reg;
1636 s32 status = IXGBE_ERR_EEPROM;
1637
1638 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1639
1640 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1641 if (ee_reg == IXGBE_NVM_POLL_READ)
1642 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1643 else
1644 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1645
1646 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1647 status = IXGBE_SUCCESS;
1648 break;
1649 }
1650 usec_delay(5);
1651 }
1652 return status;
1653 }
1654
1655 /**
1656 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1657 * @hw: pointer to hardware structure
1658 *
1659 * Prepares EEPROM for access using bit-bang method. This function should
1660 * be called before issuing a command to the EEPROM.
1661 **/
1662 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1663 {
1664 s32 status = IXGBE_SUCCESS;
1665 u32 eec;
1666 u32 i;
1667
1668 DEBUGFUNC("ixgbe_acquire_eeprom");
1669
1670 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1671 != IXGBE_SUCCESS)
1672 status = IXGBE_ERR_SWFW_SYNC;
1673
1674 if (status == IXGBE_SUCCESS) {
1675 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1676
1677 /* Request EEPROM Access */
1678 eec |= IXGBE_EEC_REQ;
1679 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1680
1681 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1682 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1683 if (eec & IXGBE_EEC_GNT)
1684 break;
1685 usec_delay(5);
1686 }
1687
1688 /* Release if grant not acquired */
1689 if (!(eec & IXGBE_EEC_GNT)) {
1690 eec &= ~IXGBE_EEC_REQ;
1691 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1692 DEBUGOUT("Could not acquire EEPROM grant\n");
1693
1694 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1695 status = IXGBE_ERR_EEPROM;
1696 }
1697
1698 /* Setup EEPROM for Read/Write */
1699 if (status == IXGBE_SUCCESS) {
1700 /* Clear CS and SK */
1701 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1702 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1703 IXGBE_WRITE_FLUSH(hw);
1704 usec_delay(1);
1705 }
1706 }
1707 return status;
1708 }
1709
1710 /**
1711 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1712 * @hw: pointer to hardware structure
1713 *
1714 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1715 **/
1716 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1717 {
1718 s32 status = IXGBE_ERR_EEPROM;
1719 u32 timeout = 2000;
1720 u32 i;
1721 u32 swsm;
1722
1723 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1724
1725
1726 /* Get SMBI software semaphore between device drivers first */
1727 for (i = 0; i < timeout; i++) {
1728 /*
1729 * If the SMBI bit is 0 when we read it, then the bit will be
1730 * set and we have the semaphore
1731 */
1732 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1733 if (!(swsm & IXGBE_SWSM_SMBI)) {
1734 status = IXGBE_SUCCESS;
1735 break;
1736 }
1737 usec_delay(50);
1738 }
1739
1740 if (i == timeout) {
1741 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1742 "not granted.\n");
1743 /*
1744 * this release is particularly important because our attempts
1745 * above to get the semaphore may have succeeded, and if there
1746 * was a timeout, we should unconditionally clear the semaphore
1747 * bits to free the driver to make progress
1748 */
1749 ixgbe_release_eeprom_semaphore(hw);
1750
1751 usec_delay(50);
1752 /*
1753 * one last try
1754 * If the SMBI bit is 0 when we read it, then the bit will be
1755 * set and we have the semaphore
1756 */
1757 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1758 if (!(swsm & IXGBE_SWSM_SMBI))
1759 status = IXGBE_SUCCESS;
1760 }
1761
1762 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1763 if (status == IXGBE_SUCCESS) {
1764 for (i = 0; i < timeout; i++) {
1765 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1766
1767 /* Set the SW EEPROM semaphore bit to request access */
1768 swsm |= IXGBE_SWSM_SWESMBI;
1769 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1770
1771 /*
1772 * If we set the bit successfully then we got the
1773 * semaphore.
1774 */
1775 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1776 if (swsm & IXGBE_SWSM_SWESMBI)
1777 break;
1778
1779 usec_delay(50);
1780 }
1781
1782 /*
1783 * Release semaphores and return error if SW EEPROM semaphore
1784 * was not granted because we don't have access to the EEPROM
1785 */
1786 if (i >= timeout) {
1787 DEBUGOUT("SWESMBI Software EEPROM semaphore "
1788 "not granted.\n");
1789 ixgbe_release_eeprom_semaphore(hw);
1790 status = IXGBE_ERR_EEPROM;
1791 }
1792 } else {
1793 DEBUGOUT("Software semaphore SMBI between device drivers "
1794 "not granted.\n");
1795 }
1796
1797 return status;
1798 }
1799
1800 /**
1801 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1802 * @hw: pointer to hardware structure
1803 *
1804 * This function clears hardware semaphore bits.
1805 **/
1806 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1807 {
1808 u32 swsm;
1809
1810 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1811
1812 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1813
1814 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1815 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1816 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1817 IXGBE_WRITE_FLUSH(hw);
1818 }
1819
1820 /**
1821 * ixgbe_ready_eeprom - Polls for EEPROM ready
1822 * @hw: pointer to hardware structure
1823 **/
1824 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1825 {
1826 s32 status = IXGBE_SUCCESS;
1827 u16 i;
1828 u8 spi_stat_reg;
1829
1830 DEBUGFUNC("ixgbe_ready_eeprom");
1831
1832 /*
1833 * Read "Status Register" repeatedly until the LSB is cleared. The
1834 * EEPROM will signal that the command has been completed by clearing
1835 * bit 0 of the internal status register. If it's not cleared within
1836 * 5 milliseconds, then error out.
1837 */
1838 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1839 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1840 IXGBE_EEPROM_OPCODE_BITS);
1841 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1842 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1843 break;
1844
1845 usec_delay(5);
1846 ixgbe_standby_eeprom(hw);
1847 };
1848
1849 /*
1850 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1851 * devices (and only 0-5mSec on 5V devices)
1852 */
1853 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1854 DEBUGOUT("SPI EEPROM Status error\n");
1855 status = IXGBE_ERR_EEPROM;
1856 }
1857
1858 return status;
1859 }
1860
1861 /**
1862 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1863 * @hw: pointer to hardware structure
1864 **/
1865 static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1866 {
1867 u32 eec;
1868
1869 DEBUGFUNC("ixgbe_standby_eeprom");
1870
1871 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1872
1873 /* Toggle CS to flush commands */
1874 eec |= IXGBE_EEC_CS;
1875 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1876 IXGBE_WRITE_FLUSH(hw);
1877 usec_delay(1);
1878 eec &= ~IXGBE_EEC_CS;
1879 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1880 IXGBE_WRITE_FLUSH(hw);
1881 usec_delay(1);
1882 }
1883
1884 /**
1885 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1886 * @hw: pointer to hardware structure
1887 * @data: data to send to the EEPROM
1888 * @count: number of bits to shift out
1889 **/
1890 static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1891 u16 count)
1892 {
1893 u32 eec;
1894 u32 mask;
1895 u32 i;
1896
1897 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1898
1899 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1900
1901 /*
1902 * Mask is used to shift "count" bits of "data" out to the EEPROM
1903 * one bit at a time. Determine the starting bit based on count
1904 */
1905 mask = 0x01 << (count - 1);
1906
1907 for (i = 0; i < count; i++) {
1908 /*
1909 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1910 * "1", and then raising and then lowering the clock (the SK
1911 * bit controls the clock input to the EEPROM). A "0" is
1912 * shifted out to the EEPROM by setting "DI" to "0" and then
1913 * raising and then lowering the clock.
1914 */
1915 if (data & mask)
1916 eec |= IXGBE_EEC_DI;
1917 else
1918 eec &= ~IXGBE_EEC_DI;
1919
1920 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1921 IXGBE_WRITE_FLUSH(hw);
1922
1923 usec_delay(1);
1924
1925 ixgbe_raise_eeprom_clk(hw, &eec);
1926 ixgbe_lower_eeprom_clk(hw, &eec);
1927
1928 /*
1929 * Shift mask to signify next bit of data to shift in to the
1930 * EEPROM
1931 */
1932 mask = mask >> 1;
1933 };
1934
1935 /* We leave the "DI" bit set to "0" when we leave this routine. */
1936 eec &= ~IXGBE_EEC_DI;
1937 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1938 IXGBE_WRITE_FLUSH(hw);
1939 }
1940
1941 /**
1942 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1943 * @hw: pointer to hardware structure
1944 **/
1945 static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1946 {
1947 u32 eec;
1948 u32 i;
1949 u16 data = 0;
1950
1951 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1952
1953 /*
1954 * In order to read a register from the EEPROM, we need to shift
1955 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1956 * the clock input to the EEPROM (setting the SK bit), and then reading
1957 * the value of the "DO" bit. During this "shifting in" process the
1958 * "DI" bit should always be clear.
1959 */
1960 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1961
1962 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1963
1964 for (i = 0; i < count; i++) {
1965 data = data << 1;
1966 ixgbe_raise_eeprom_clk(hw, &eec);
1967
1968 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1969
1970 eec &= ~(IXGBE_EEC_DI);
1971 if (eec & IXGBE_EEC_DO)
1972 data |= 1;
1973
1974 ixgbe_lower_eeprom_clk(hw, &eec);
1975 }
1976
1977 return data;
1978 }
1979
1980 /**
1981 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1982 * @hw: pointer to hardware structure
1983 * @eec: EEC register's current value
1984 **/
1985 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1986 {
1987 DEBUGFUNC("ixgbe_raise_eeprom_clk");
1988
1989 /*
1990 * Raise the clock input to the EEPROM
1991 * (setting the SK bit), then delay
1992 */
1993 *eec = *eec | IXGBE_EEC_SK;
1994 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
1995 IXGBE_WRITE_FLUSH(hw);
1996 usec_delay(1);
1997 }
1998
1999 /**
2000 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2001 * @hw: pointer to hardware structure
2002 * @eecd: EECD's current value
2003 **/
2004 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2005 {
2006 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2007
2008 /*
2009 * Lower the clock input to the EEPROM (clearing the SK bit), then
2010 * delay
2011 */
2012 *eec = *eec & ~IXGBE_EEC_SK;
2013 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2014 IXGBE_WRITE_FLUSH(hw);
2015 usec_delay(1);
2016 }
2017
2018 /**
2019 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2020 * @hw: pointer to hardware structure
2021 **/
2022 static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2023 {
2024 u32 eec;
2025
2026 DEBUGFUNC("ixgbe_release_eeprom");
2027
2028 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2029
2030 eec |= IXGBE_EEC_CS; /* Pull CS high */
2031 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2032
2033 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2034 IXGBE_WRITE_FLUSH(hw);
2035
2036 usec_delay(1);
2037
2038 /* Stop requesting EEPROM access */
2039 eec &= ~IXGBE_EEC_REQ;
2040 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2041
2042 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2043
2044 /* Delay before attempt to obtain semaphore again to allow FW access */
2045 msec_delay(hw->eeprom.semaphore_delay);
2046 }
2047
2048 /**
2049 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2050 * @hw: pointer to hardware structure
2051 **/
2052 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2053 {
2054 u16 i;
2055 u16 j;
2056 u16 checksum = 0;
2057 u16 length = 0;
2058 u16 pointer = 0;
2059 u16 word = 0;
2060
2061 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2062
2063 /* Include 0x0-0x3F in the checksum */
2064 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2065 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
2066 DEBUGOUT("EEPROM read failed\n");
2067 break;
2068 }
2069 checksum += word;
2070 }
2071
2072 /* Include all data from pointers except for the fw pointer */
2073 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2074 hw->eeprom.ops.read(hw, i, &pointer);
2075
2076 /* Make sure the pointer seems valid */
2077 if (pointer != 0xFFFF && pointer != 0) {
2078 hw->eeprom.ops.read(hw, pointer, &length);
2079
2080 if (length != 0xFFFF && length != 0) {
2081 for (j = pointer+1; j <= pointer+length; j++) {
2082 hw->eeprom.ops.read(hw, j, &word);
2083 checksum += word;
2084 }
2085 }
2086 }
2087 }
2088
2089 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2090
2091 return checksum;
2092 }
2093
2094 /**
2095 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2096 * @hw: pointer to hardware structure
2097 * @checksum_val: calculated checksum
2098 *
2099 * Performs checksum calculation and validates the EEPROM checksum. If the
2100 * caller does not need checksum_val, the value can be NULL.
2101 **/
2102 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2103 u16 *checksum_val)
2104 {
2105 s32 status;
2106 u16 checksum;
2107 u16 read_checksum = 0;
2108
2109 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2110
2111 /*
2112 * Read the first word from the EEPROM. If this times out or fails, do
2113 * not continue or we could be in for a very long wait while every
2114 * EEPROM read fails
2115 */
2116 status = hw->eeprom.ops.read(hw, 0, &checksum);
2117
2118 if (status == IXGBE_SUCCESS) {
2119 checksum = hw->eeprom.ops.calc_checksum(hw);
2120
2121 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2122
2123 /*
2124 * Verify read checksum from EEPROM is the same as
2125 * calculated checksum
2126 */
2127 if (read_checksum != checksum)
2128 status = IXGBE_ERR_EEPROM_CHECKSUM;
2129
2130 /* If the user cares, return the calculated checksum */
2131 if (checksum_val)
2132 *checksum_val = checksum;
2133 } else {
2134 DEBUGOUT("EEPROM read failed\n");
2135 }
2136
2137 return status;
2138 }
2139
2140 /**
2141 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2142 * @hw: pointer to hardware structure
2143 **/
2144 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2145 {
2146 s32 status;
2147 u16 checksum;
2148
2149 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2150
2151 /*
2152 * Read the first word from the EEPROM. If this times out or fails, do
2153 * not continue or we could be in for a very long wait while every
2154 * EEPROM read fails
2155 */
2156 status = hw->eeprom.ops.read(hw, 0, &checksum);
2157
2158 if (status == IXGBE_SUCCESS) {
2159 checksum = hw->eeprom.ops.calc_checksum(hw);
2160 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
2161 checksum);
2162 } else {
2163 DEBUGOUT("EEPROM read failed\n");
2164 }
2165
2166 return status;
2167 }
2168
2169 /**
2170 * ixgbe_validate_mac_addr - Validate MAC address
2171 * @mac_addr: pointer to MAC address.
2172 *
2173 * Tests a MAC address to ensure it is a valid Individual Address
2174 **/
2175 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2176 {
2177 s32 status = IXGBE_SUCCESS;
2178
2179 DEBUGFUNC("ixgbe_validate_mac_addr");
2180
2181 /* Make sure it is not a multicast address */
2182 if (IXGBE_IS_MULTICAST(mac_addr)) {
2183 DEBUGOUT("MAC address is multicast\n");
2184 status = IXGBE_ERR_INVALID_MAC_ADDR;
2185 /* Not a broadcast address */
2186 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2187 DEBUGOUT("MAC address is broadcast\n");
2188 status = IXGBE_ERR_INVALID_MAC_ADDR;
2189 /* Reject the zero address */
2190 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2191 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2192 DEBUGOUT("MAC address is all zeros\n");
2193 status = IXGBE_ERR_INVALID_MAC_ADDR;
2194 }
2195 return status;
2196 }
2197
2198 /**
2199 * ixgbe_set_rar_generic - Set Rx address register
2200 * @hw: pointer to hardware structure
2201 * @index: Receive address register to write
2202 * @addr: Address to put into receive address register
2203 * @vmdq: VMDq "set" or "pool" index
2204 * @enable_addr: set flag that address is active
2205 *
2206 * Puts an ethernet address into a receive address register.
2207 **/
2208 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2209 u32 enable_addr)
2210 {
2211 u32 rar_low, rar_high;
2212 u32 rar_entries = hw->mac.num_rar_entries;
2213
2214 DEBUGFUNC("ixgbe_set_rar_generic");
2215
2216 /* Make sure we are using a valid rar index range */
2217 if (index >= rar_entries) {
2218 DEBUGOUT1("RAR index %d is out of range.\n", index);
2219 return IXGBE_ERR_INVALID_ARGUMENT;
2220 }
2221
2222 /* setup VMDq pool selection before this RAR gets enabled */
2223 hw->mac.ops.set_vmdq(hw, index, vmdq);
2224
2225 /*
2226 * HW expects these in little endian so we reverse the byte
2227 * order from network order (big endian) to little endian
2228 */
2229 rar_low = ((u32)addr[0] |
2230 ((u32)addr[1] << 8) |
2231 ((u32)addr[2] << 16) |
2232 ((u32)addr[3] << 24));
2233 /*
2234 * Some parts put the VMDq setting in the extra RAH bits,
2235 * so save everything except the lower 16 bits that hold part
2236 * of the address and the address valid bit.
2237 */
2238 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2239 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2240 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2241
2242 if (enable_addr != 0)
2243 rar_high |= IXGBE_RAH_AV;
2244
2245 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2246 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2247
2248 return IXGBE_SUCCESS;
2249 }
2250
2251 /**
2252 * ixgbe_clear_rar_generic - Remove Rx address register
2253 * @hw: pointer to hardware structure
2254 * @index: Receive address register to write
2255 *
2256 * Clears an ethernet address from a receive address register.
2257 **/
2258 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2259 {
2260 u32 rar_high;
2261 u32 rar_entries = hw->mac.num_rar_entries;
2262
2263 DEBUGFUNC("ixgbe_clear_rar_generic");
2264
2265 /* Make sure we are using a valid rar index range */
2266 if (index >= rar_entries) {
2267 DEBUGOUT1("RAR index %d is out of range.\n", index);
2268 return IXGBE_ERR_INVALID_ARGUMENT;
2269 }
2270
2271 /*
2272 * Some parts put the VMDq setting in the extra RAH bits,
2273 * so save everything except the lower 16 bits that hold part
2274 * of the address and the address valid bit.
2275 */
2276 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2277 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2278
2279 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2280 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2281
2282 /* clear VMDq pool/queue selection for this RAR */
2283 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2284
2285 return IXGBE_SUCCESS;
2286 }
2287
2288 /**
2289 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2290 * @hw: pointer to hardware structure
2291 *
2292 * Places the MAC address in receive address register 0 and clears the rest
2293 * of the receive address registers. Clears the multicast table. Assumes
2294 * the receiver is in reset when the routine is called.
2295 **/
2296 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2297 {
2298 u32 i;
2299 u32 rar_entries = hw->mac.num_rar_entries;
2300
2301 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2302
2303 /*
2304 * If the current mac address is valid, assume it is a software override
2305 * to the permanent address.
2306 * Otherwise, use the permanent address from the eeprom.
2307 */
2308 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2309 IXGBE_ERR_INVALID_MAC_ADDR) {
2310 /* Get the MAC address from the RAR0 for later reference */
2311 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2312
2313 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2314 hw->mac.addr[0], hw->mac.addr[1],
2315 hw->mac.addr[2]);
2316 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2317 hw->mac.addr[4], hw->mac.addr[5]);
2318 } else {
2319 /* Setup the receive address. */
2320 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2321 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2322 hw->mac.addr[0], hw->mac.addr[1],
2323 hw->mac.addr[2]);
2324 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2325 hw->mac.addr[4], hw->mac.addr[5]);
2326
2327 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2328
2329 /* clear VMDq pool/queue selection for RAR 0 */
2330 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2331 }
2332 hw->addr_ctrl.overflow_promisc = 0;
2333
2334 hw->addr_ctrl.rar_used_count = 1;
2335
2336 /* Zero out the other receive addresses. */
2337 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2338 for (i = 1; i < rar_entries; i++) {
2339 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2340 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2341 }
2342
2343 /* Clear the MTA */
2344 hw->addr_ctrl.mta_in_use = 0;
2345 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2346
2347 DEBUGOUT(" Clearing MTA\n");
2348 for (i = 0; i < hw->mac.mcft_size; i++)
2349 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2350
2351 /* Should always be IXGBE_SUCCESS. */
2352 return ixgbe_init_uta_tables(hw);
2353 }
2354
2355 /**
2356 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2357 * @hw: pointer to hardware structure
2358 * @addr: new address
2359 *
2360 * Adds it to unused receive address register or goes into promiscuous mode.
2361 **/
2362 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2363 {
2364 u32 rar_entries = hw->mac.num_rar_entries;
2365 u32 rar;
2366
2367 DEBUGFUNC("ixgbe_add_uc_addr");
2368
2369 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2370 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2371
2372 /*
2373 * Place this address in the RAR if there is room,
2374 * else put the controller into promiscuous mode
2375 */
2376 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2377 rar = hw->addr_ctrl.rar_used_count;
2378 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2379 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2380 hw->addr_ctrl.rar_used_count++;
2381 } else {
2382 hw->addr_ctrl.overflow_promisc++;
2383 }
2384
2385 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2386 }
2387
2388 /**
2389 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2390 * @hw: pointer to hardware structure
2391 * @addr_list: the list of new addresses
2392 * @addr_count: number of addresses
2393 * @next: iterator function to walk the address list
2394 *
2395 * The given list replaces any existing list. Clears the secondary addrs from
2396 * receive address registers. Uses unused receive address registers for the
2397 * first secondary addresses, and falls back to promiscuous mode as needed.
2398 *
2399 * Drivers using secondary unicast addresses must set user_set_promisc when
2400 * manually putting the device into promiscuous mode.
2401 **/
2402 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2403 u32 addr_count, ixgbe_mc_addr_itr next)
2404 {
2405 u8 *addr;
2406 u32 i;
2407 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2408 u32 uc_addr_in_use;
2409 u32 fctrl;
2410 u32 vmdq;
2411
2412 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2413
2414 /*
2415 * Clear accounting of old secondary address list,
2416 * don't count RAR[0]
2417 */
2418 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2419 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2420 hw->addr_ctrl.overflow_promisc = 0;
2421
2422 /* Zero out the other receive addresses */
2423 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2424 for (i = 0; i < uc_addr_in_use; i++) {
2425 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2426 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2427 }
2428
2429 /* Add the new addresses */
2430 for (i = 0; i < addr_count; i++) {
2431 DEBUGOUT(" Adding the secondary addresses:\n");
2432 addr = next(hw, &addr_list, &vmdq);
2433 ixgbe_add_uc_addr(hw, addr, vmdq);
2434 }
2435
2436 if (hw->addr_ctrl.overflow_promisc) {
2437 /* enable promisc if not already in overflow or set by user */
2438 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2439 DEBUGOUT(" Entering address overflow promisc mode\n");
2440 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2441 fctrl |= IXGBE_FCTRL_UPE;
2442 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2443 }
2444 } else {
2445 /* only disable if set by overflow, not by user */
2446 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2447 DEBUGOUT(" Leaving address overflow promisc mode\n");
2448 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2449 fctrl &= ~IXGBE_FCTRL_UPE;
2450 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2451 }
2452 }
2453
2454 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2455 return IXGBE_SUCCESS;
2456 }
2457
2458 /**
2459 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2460 * @hw: pointer to hardware structure
2461 * @mc_addr: the multicast address
2462 *
2463 * Extracts the 12 bits, from a multicast address, to determine which
2464 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2465 * incoming rx multicast addresses, to determine the bit-vector to check in
2466 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2467 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2468 * to mc_filter_type.
2469 **/
2470 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2471 {
2472 u32 vector = 0;
2473
2474 DEBUGFUNC("ixgbe_mta_vector");
2475
2476 switch (hw->mac.mc_filter_type) {
2477 case 0: /* use bits [47:36] of the address */
2478 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2479 break;
2480 case 1: /* use bits [46:35] of the address */
2481 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2482 break;
2483 case 2: /* use bits [45:34] of the address */
2484 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2485 break;
2486 case 3: /* use bits [43:32] of the address */
2487 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2488 break;
2489 default: /* Invalid mc_filter_type */
2490 DEBUGOUT("MC filter type param set incorrectly\n");
2491 ASSERT(0);
2492 break;
2493 }
2494
2495 /* vector can only be 12-bits or boundary will be exceeded */
2496 vector &= 0xFFF;
2497 return vector;
2498 }
2499
2500 /**
2501 * ixgbe_set_mta - Set bit-vector in multicast table
2502 * @hw: pointer to hardware structure
2503 * @hash_value: Multicast address hash value
2504 *
2505 * Sets the bit-vector in the multicast table.
2506 **/
2507 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2508 {
2509 u32 vector;
2510 u32 vector_bit;
2511 u32 vector_reg;
2512
2513 DEBUGFUNC("ixgbe_set_mta");
2514
2515 hw->addr_ctrl.mta_in_use++;
2516
2517 vector = ixgbe_mta_vector(hw, mc_addr);
2518 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2519
2520 /*
2521 * The MTA is a register array of 128 32-bit registers. It is treated
2522 * like an array of 4096 bits. We want to set bit
2523 * BitArray[vector_value]. So we figure out what register the bit is
2524 * in, read it, OR in the new bit, then write back the new value. The
2525 * register is determined by the upper 7 bits of the vector value and
2526 * the bit within that register are determined by the lower 5 bits of
2527 * the value.
2528 */
2529 vector_reg = (vector >> 5) & 0x7F;
2530 vector_bit = vector & 0x1F;
2531 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2532 }
2533
2534 /**
2535 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2536 * @hw: pointer to hardware structure
2537 * @mc_addr_list: the list of new multicast addresses
2538 * @mc_addr_count: number of addresses
2539 * @next: iterator function to walk the multicast address list
2540 * @clear: flag, when set clears the table beforehand
2541 *
2542 * When the clear flag is set, the given list replaces any existing list.
2543 * Hashes the given addresses into the multicast table.
2544 **/
2545 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2546 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2547 bool clear)
2548 {
2549 u32 i;
2550 u32 vmdq;
2551
2552 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2553
2554 /*
2555 * Set the new number of MC addresses that we are being requested to
2556 * use.
2557 */
2558 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2559 hw->addr_ctrl.mta_in_use = 0;
2560
2561 /* Clear mta_shadow */
2562 if (clear) {
2563 DEBUGOUT(" Clearing MTA\n");
2564 (void) memset(&hw->mac.mta_shadow, 0,
2565 sizeof(hw->mac.mta_shadow));
2566 }
2567
2568 /* Update mta_shadow */
2569 for (i = 0; i < mc_addr_count; i++) {
2570 DEBUGOUT(" Adding the multicast addresses:\n");
2571 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2572 }
2573
2574 /* Enable mta */
2575 for (i = 0; i < hw->mac.mcft_size; i++)
2576 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2577 hw->mac.mta_shadow[i]);
2578
2579 if (hw->addr_ctrl.mta_in_use > 0)
2580 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2581 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2582
2583 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2584 return IXGBE_SUCCESS;
2585 }
2586
2587 /**
2588 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2589 * @hw: pointer to hardware structure
2590 *
2591 * Enables multicast address in RAR and the use of the multicast hash table.
2592 **/
2593 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2594 {
2595 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2596
2597 DEBUGFUNC("ixgbe_enable_mc_generic");
2598
2599 if (a->mta_in_use > 0)
2600 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2601 hw->mac.mc_filter_type);
2602
2603 return IXGBE_SUCCESS;
2604 }
2605
2606 /**
2607 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2608 * @hw: pointer to hardware structure
2609 *
2610 * Disables multicast address in RAR and the use of the multicast hash table.
2611 **/
2612 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2613 {
2614 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2615
2616 DEBUGFUNC("ixgbe_disable_mc_generic");
2617
2618 if (a->mta_in_use > 0)
2619 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2620
2621 return IXGBE_SUCCESS;
2622 }
2623
2624 /**
2625 * ixgbe_fc_enable_generic - Enable flow control
2626 * @hw: pointer to hardware structure
2627 *
2628 * Enable flow control according to the current settings.
2629 **/
2630 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2631 {
2632 s32 ret_val = IXGBE_SUCCESS;
2633 u32 mflcn_reg, fccfg_reg;
2634 u32 reg;
2635 u32 fcrtl, fcrth;
2636 int i;
2637
2638 DEBUGFUNC("ixgbe_fc_enable_generic");
2639
2640 /* Validate the water mark configuration */
2641 if (!hw->fc.pause_time) {
2642 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2643 goto out;
2644 }
2645
2646 /* Low water mark of zero causes XOFF floods */
2647 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2648 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2649 hw->fc.high_water[i]) {
2650 if (!hw->fc.low_water[i] ||
2651 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2652 DEBUGOUT("Invalid water mark configuration\n");
2653 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2654 goto out;
2655 }
2656 }
2657 }
2658
2659 /* Negotiate the fc mode to use */
2660 ixgbe_fc_autoneg(hw);
2661
2662 /* Disable any previous flow control settings */
2663 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2664 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2665
2666 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2667 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2668
2669 /*
2670 * The possible values of fc.current_mode are:
2671 * 0: Flow control is completely disabled
2672 * 1: Rx flow control is enabled (we can receive pause frames,
2673 * but not send pause frames).
2674 * 2: Tx flow control is enabled (we can send pause frames but
2675 * we do not support receiving pause frames).
2676 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2677 * other: Invalid.
2678 */
2679 switch (hw->fc.current_mode) {
2680 case ixgbe_fc_none:
2681 /*
2682 * Flow control is disabled by software override or autoneg.
2683 * The code below will actually disable it in the HW.
2684 */
2685 break;
2686 case ixgbe_fc_rx_pause:
2687 /*
2688 * Rx Flow control is enabled and Tx Flow control is
2689 * disabled by software override. Since there really
2690 * isn't a way to advertise that we are capable of RX
2691 * Pause ONLY, we will advertise that we support both
2692 * symmetric and asymmetric Rx PAUSE. Later, we will
2693 * disable the adapter's ability to send PAUSE frames.
2694 */
2695 mflcn_reg |= IXGBE_MFLCN_RFCE;
2696 break;
2697 case ixgbe_fc_tx_pause:
2698 /*
2699 * Tx Flow control is enabled, and Rx Flow control is
2700 * disabled by software override.
2701 */
2702 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2703 break;
2704 case ixgbe_fc_full:
2705 /* Flow control (both Rx and Tx) is enabled by SW override. */
2706 mflcn_reg |= IXGBE_MFLCN_RFCE;
2707 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2708 break;
2709 default:
2710 DEBUGOUT("Flow control param set incorrectly\n");
2711 ret_val = IXGBE_ERR_CONFIG;
2712 goto out;
2713 }
2714
2715 /* Set 802.3x based flow control settings. */
2716 mflcn_reg |= IXGBE_MFLCN_DPF;
2717 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2718 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2719
2720
2721 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2722 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2723 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2724 hw->fc.high_water[i]) {
2725 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2726 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2727 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2728 } else {
2729 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2730 /*
2731 * In order to prevent Tx hangs when the internal Tx
2732 * switch is enabled we must set the high water mark
2733 * to the maximum FCRTH value. This allows the Tx
2734 * switch to function even under heavy Rx workloads.
2735 */
2736 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2737 }
2738
2739 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2740 }
2741
2742 /* Configure pause time (2 TCs per register) */
2743 reg = hw->fc.pause_time * 0x00010001;
2744 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2745 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2746
2747 /* Configure flow control refresh threshold value */
2748 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2749
2750 out:
2751 return ret_val;
2752 }
2753
2754 /**
2755 * ixgbe_negotiate_fc - Negotiate flow control
2756 * @hw: pointer to hardware structure
2757 * @adv_reg: flow control advertised settings
2758 * @lp_reg: link partner's flow control settings
2759 * @adv_sym: symmetric pause bit in advertisement
2760 * @adv_asm: asymmetric pause bit in advertisement
2761 * @lp_sym: symmetric pause bit in link partner advertisement
2762 * @lp_asm: asymmetric pause bit in link partner advertisement
2763 *
2764 * Find the intersection between advertised settings and link partner's
2765 * advertised settings
2766 **/
2767 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2768 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2769 {
2770 if ((!(adv_reg)) || (!(lp_reg)))
2771 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2772
2773 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2774 /*
2775 * Now we need to check if the user selected Rx ONLY
2776 * of pause frames. In this case, we had to advertise
2777 * FULL flow control because we could not advertise RX
2778 * ONLY. Hence, we must now check to see if we need to
2779 * turn OFF the TRANSMISSION of PAUSE frames.
2780 */
2781 if (hw->fc.requested_mode == ixgbe_fc_full) {
2782 hw->fc.current_mode = ixgbe_fc_full;
2783 DEBUGOUT("Flow Control = FULL.\n");
2784 } else {
2785 hw->fc.current_mode = ixgbe_fc_rx_pause;
2786 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2787 }
2788 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2789 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2790 hw->fc.current_mode = ixgbe_fc_tx_pause;
2791 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2792 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2793 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2794 hw->fc.current_mode = ixgbe_fc_rx_pause;
2795 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2796 } else {
2797 hw->fc.current_mode = ixgbe_fc_none;
2798 DEBUGOUT("Flow Control = NONE.\n");
2799 }
2800 return IXGBE_SUCCESS;
2801 }
2802
2803 /**
2804 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2805 * @hw: pointer to hardware structure
2806 *
2807 * Enable flow control according on 1 gig fiber.
2808 **/
2809 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2810 {
2811 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2812 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2813
2814 /*
2815 * On multispeed fiber at 1g, bail out if
2816 * - link is up but AN did not complete, or if
2817 * - link is up and AN completed but timed out
2818 */
2819
2820 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2821 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2822 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2823 goto out;
2824
2825 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2826 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2827
2828 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2829 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2830 IXGBE_PCS1GANA_ASM_PAUSE,
2831 IXGBE_PCS1GANA_SYM_PAUSE,
2832 IXGBE_PCS1GANA_ASM_PAUSE);
2833
2834 out:
2835 return ret_val;
2836 }
2837
2838 /**
2839 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2840 * @hw: pointer to hardware structure
2841 *
2842 * Enable flow control according to IEEE clause 37.
2843 **/
2844 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2845 {
2846 u32 links2, anlp1_reg, autoc_reg, links;
2847 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2848
2849 /*
2850 * On backplane, bail out if
2851 * - backplane autoneg was not completed, or if
2852 * - we are 82599 and link partner is not AN enabled
2853 */
2854 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2855 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2856 goto out;
2857
2858 if (hw->mac.type == ixgbe_mac_82599EB) {
2859 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2860 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2861 goto out;
2862 }
2863 /*
2864 * Read the 10g AN autoc and LP ability registers and resolve
2865 * local flow control settings accordingly
2866 */
2867 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2868 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2869
2870 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2871 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2872 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2873
2874 out:
2875 return ret_val;
2876 }
2877
2878 /**
2879 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2880 * @hw: pointer to hardware structure
2881 *
2882 * Enable flow control according to IEEE clause 37.
2883 **/
2884 static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2885 {
2886 u16 technology_ability_reg = 0;
2887 u16 lp_technology_ability_reg = 0;
2888
2889 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2890 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2891 &technology_ability_reg);
2892 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2893 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2894 &lp_technology_ability_reg);
2895
2896 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2897 (u32)lp_technology_ability_reg,
2898 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2899 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2900 }
2901
2902 /**
2903 * ixgbe_fc_autoneg - Configure flow control
2904 * @hw: pointer to hardware structure
2905 *
2906 * Compares our advertised flow control capabilities to those advertised by
2907 * our link partner, and determines the proper flow control mode to use.
2908 **/
2909 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2910 {
2911 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2912 ixgbe_link_speed speed;
2913 bool link_up;
2914
2915 DEBUGFUNC("ixgbe_fc_autoneg");
2916
2917 /*
2918 * AN should have completed when the cable was plugged in.
2919 * Look for reasons to bail out. Bail out if:
2920 * - FC autoneg is disabled, or if
2921 * - link is not up.
2922 */
2923 if (hw->fc.disable_fc_autoneg)
2924 goto out;
2925
2926 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
2927 if (!link_up)
2928 goto out;
2929
2930 switch (hw->phy.media_type) {
2931 /* Autoneg flow control on fiber adapters */
2932 case ixgbe_media_type_fiber_fixed:
2933 case ixgbe_media_type_fiber:
2934 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2935 ret_val = ixgbe_fc_autoneg_fiber(hw);
2936 break;
2937
2938 /* Autoneg flow control on backplane adapters */
2939 case ixgbe_media_type_backplane:
2940 ret_val = ixgbe_fc_autoneg_backplane(hw);
2941 break;
2942
2943 /* Autoneg flow control on copper adapters */
2944 case ixgbe_media_type_copper:
2945 if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
2946 ret_val = ixgbe_fc_autoneg_copper(hw);
2947 break;
2948
2949 default:
2950 break;
2951 }
2952
2953 out:
2954 if (ret_val == IXGBE_SUCCESS) {
2955 hw->fc.fc_was_autonegged = TRUE;
2956 } else {
2957 hw->fc.fc_was_autonegged = FALSE;
2958 hw->fc.current_mode = hw->fc.requested_mode;
2959 }
2960 }
2961
2962 /**
2963 * ixgbe_disable_pcie_master - Disable PCI-express master access
2964 * @hw: pointer to hardware structure
2965 *
2966 * Disables PCI-Express master access and verifies there are no pending
2967 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2968 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
2969 * is returned signifying master requests disabled.
2970 **/
2971 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2972 {
2973 s32 status = IXGBE_SUCCESS;
2974 u32 i;
2975
2976 DEBUGFUNC("ixgbe_disable_pcie_master");
2977
2978 /* Always set this bit to ensure any future transactions are blocked */
2979 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2980
2981 /* Exit if master requets are blocked */
2982 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2983 goto out;
2984
2985 /* Poll for master request bit to clear */
2986 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2987 usec_delay(100);
2988 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2989 goto out;
2990 }
2991
2992 /*
2993 * Two consecutive resets are required via CTRL.RST per datasheet
2994 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2995 * of this need. The first reset prevents new master requests from
2996 * being issued by our device. We then must wait 1usec or more for any
2997 * remaining completions from the PCIe bus to trickle in, and then reset
2998 * again to clear out any effects they may have had on our device.
2999 */
3000 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3001 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3002
3003 /*
3004 * Before proceeding, make sure that the PCIe block does not have
3005 * transactions pending.
3006 */
3007 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3008 usec_delay(100);
3009 if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
3010 IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3011 goto out;
3012 }
3013
3014 DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
3015 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3016
3017 out:
3018 return status;
3019 }
3020
3021 /**
3022 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3023 * @hw: pointer to hardware structure
3024 * @mask: Mask to specify which semaphore to acquire
3025 *
3026 * Acquires the SWFW semaphore through the GSSR register for the specified
3027 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3028 **/
3029 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3030 {
3031 u32 gssr;
3032 u32 swmask = mask;
3033 u32 fwmask = mask << 5;
3034 s32 timeout = 200;
3035
3036 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3037
3038 while (timeout) {
3039 /*
3040 * SW EEPROM semaphore bit is used for access to all
3041 * SW_FW_SYNC/GSSR bits (not just EEPROM)
3042 */
3043 if (ixgbe_get_eeprom_semaphore(hw))
3044 return IXGBE_ERR_SWFW_SYNC;
3045
3046 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3047 if (!(gssr & (fwmask | swmask)))
3048 break;
3049
3050 /*
3051 * Firmware currently using resource (fwmask) or other software
3052 * thread currently using resource (swmask)
3053 */
3054 ixgbe_release_eeprom_semaphore(hw);
3055 msec_delay(5);
3056 timeout--;
3057 }
3058
3059 if (!timeout) {
3060 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
3061 return IXGBE_ERR_SWFW_SYNC;
3062 }
3063
3064 gssr |= swmask;
3065 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3066
3067 ixgbe_release_eeprom_semaphore(hw);
3068 return IXGBE_SUCCESS;
3069 }
3070
3071 /**
3072 * ixgbe_release_swfw_sync - Release SWFW semaphore
3073 * @hw: pointer to hardware structure
3074 * @mask: Mask to specify which semaphore to release
3075 *
3076 * Releases the SWFW semaphore through the GSSR register for the specified
3077 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3078 **/
3079 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3080 {
3081 u32 gssr;
3082 u32 swmask = mask;
3083
3084 DEBUGFUNC("ixgbe_release_swfw_sync");
3085
3086 (void) ixgbe_get_eeprom_semaphore(hw);
3087
3088 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3089 gssr &= ~swmask;
3090 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3091
3092 ixgbe_release_eeprom_semaphore(hw);
3093 }
3094
3095 /**
3096 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3097 * @hw: pointer to hardware structure
3098 *
3099 * Stops the receive data path and waits for the HW to internally empty
3100 * the Rx security block
3101 **/
3102 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3103 {
3104 #define IXGBE_MAX_SECRX_POLL 40
3105
3106 int i;
3107 int secrxreg;
3108
3109 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3110
3111
3112 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3113 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3114 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3115 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3116 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3117 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3118 break;
3119 else
3120 /* Use interrupt-safe sleep just in case */
3121 usec_delay(1000);
3122 }
3123
3124 /* For informational purposes only */
3125 if (i >= IXGBE_MAX_SECRX_POLL)
3126 DEBUGOUT("Rx unit being enabled before security "
3127 "path fully disabled. Continuing with init.\n");
3128
3129 return IXGBE_SUCCESS;
3130 }
3131
3132 /**
3133 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3134 * @hw: pointer to hardware structure
3135 *
3136 * Enables the receive data path.
3137 **/
3138 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3139 {
3140 int secrxreg;
3141
3142 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3143
3144 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3145 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3146 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3147 IXGBE_WRITE_FLUSH(hw);
3148
3149 return IXGBE_SUCCESS;
3150 }
3151
3152 /**
3153 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3154 * @hw: pointer to hardware structure
3155 * @regval: register value to write to RXCTRL
3156 *
3157 * Enables the Rx DMA unit
3158 **/
3159 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3160 {
3161 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3162
3163 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
3164
3165 return IXGBE_SUCCESS;
3166 }
3167
3168 /**
3169 * ixgbe_blink_led_start_generic - Blink LED based on index.
3170 * @hw: pointer to hardware structure
3171 * @index: led number to blink
3172 **/
3173 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3174 {
3175 ixgbe_link_speed speed = 0;
3176 bool link_up = 0;
3177 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3178 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3179 s32 ret_val = IXGBE_SUCCESS;
3180
3181 DEBUGFUNC("ixgbe_blink_led_start_generic");
3182
3183 /*
3184 * Link must be up to auto-blink the LEDs;
3185 * Force it if link is down.
3186 */
3187 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
3188
3189 if (!link_up) {
3190 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3191 * LESM is on.
3192 */
3193 bool got_lock = FALSE;
3194 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3195 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3196 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3197 IXGBE_GSSR_MAC_CSR_SM);
3198 if (ret_val != IXGBE_SUCCESS) {
3199 ret_val = IXGBE_ERR_SWFW_SYNC;
3200 goto out;
3201 }
3202 got_lock = TRUE;
3203 }
3204
3205 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3206 autoc_reg |= IXGBE_AUTOC_FLU;
3207 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3208 IXGBE_WRITE_FLUSH(hw);
3209
3210 if (got_lock)
3211 hw->mac.ops.release_swfw_sync(hw,
3212 IXGBE_GSSR_MAC_CSR_SM);
3213 msec_delay(10);
3214 }
3215
3216 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3217 led_reg |= IXGBE_LED_BLINK(index);
3218 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3219 IXGBE_WRITE_FLUSH(hw);
3220
3221 out:
3222 return ret_val;
3223 }
3224
3225 /**
3226 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3227 * @hw: pointer to hardware structure
3228 * @index: led number to stop blinking
3229 **/
3230 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3231 {
3232 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3233 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3234 s32 ret_val = IXGBE_SUCCESS;
3235 bool got_lock = FALSE;
3236
3237 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3238 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3239 * LESM is on.
3240 */
3241 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3242 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3243 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3244 IXGBE_GSSR_MAC_CSR_SM);
3245 if (ret_val != IXGBE_SUCCESS) {
3246 ret_val = IXGBE_ERR_SWFW_SYNC;
3247 goto out;
3248 }
3249 got_lock = TRUE;
3250 }
3251
3252
3253 autoc_reg &= ~IXGBE_AUTOC_FLU;
3254 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3255 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3256
3257 if (hw->mac.type == ixgbe_mac_82599EB)
3258 (void) ixgbe_reset_pipeline_82599(hw);
3259
3260 if (got_lock)
3261 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
3262
3263 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3264 led_reg &= ~IXGBE_LED_BLINK(index);
3265 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3266 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3267 IXGBE_WRITE_FLUSH(hw);
3268
3269 out:
3270 return ret_val;
3271 }
3272
3273 /**
3274 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3275 * @hw: pointer to hardware structure
3276 * @san_mac_offset: SAN MAC address offset
3277 *
3278 * This function will read the EEPROM location for the SAN MAC address
3279 * pointer, and returns the value at that location. This is used in both
3280 * get and set mac_addr routines.
3281 **/
3282 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3283 u16 *san_mac_offset)
3284 {
3285 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3286
3287 /*
3288 * First read the EEPROM pointer to see if the MAC addresses are
3289 * available.
3290 */
3291 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
3292
3293 return IXGBE_SUCCESS;
3294 }
3295
3296 /**
3297 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3298 * @hw: pointer to hardware structure
3299 * @san_mac_addr: SAN MAC address
3300 *
3301 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3302 * per-port, so set_lan_id() must be called before reading the addresses.
3303 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3304 * upon for non-SFP connections, so we must call it here.
3305 **/
3306 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3307 {
3308 u16 san_mac_data, san_mac_offset;
3309 u8 i;
3310
3311 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3312
3313 /*
3314 * First read the EEPROM pointer to see if the MAC addresses are
3315 * available. If they're not, no point in calling set_lan_id() here.
3316 */
3317 (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3318
3319 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3320 /*
3321 * No addresses available in this EEPROM. It's not an
3322 * error though, so just wipe the local address and return.
3323 */
3324 for (i = 0; i < 6; i++)
3325 san_mac_addr[i] = 0xFF;
3326
3327 goto san_mac_addr_out;
3328 }
3329
3330 /* make sure we know which port we need to program */
3331 hw->mac.ops.set_lan_id(hw);
3332 /* apply the port offset to the address offset */
3333 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3334 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3335 for (i = 0; i < 3; i++) {
3336 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
3337 san_mac_addr[i * 2] = (u8)(san_mac_data);
3338 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3339 san_mac_offset++;
3340 }
3341
3342 san_mac_addr_out:
3343 return IXGBE_SUCCESS;
3344 }
3345
3346 /**
3347 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3348 * @hw: pointer to hardware structure
3349 * @san_mac_addr: SAN MAC address
3350 *
3351 * Write a SAN MAC address to the EEPROM.
3352 **/
3353 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3354 {
3355 s32 status = IXGBE_SUCCESS;
3356 u16 san_mac_data, san_mac_offset;
3357 u8 i;
3358
3359 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3360
3361 /* Look for SAN mac address pointer. If not defined, return */
3362 (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3363
3364 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
3365 status = IXGBE_ERR_NO_SAN_ADDR_PTR;
3366 goto san_mac_addr_out;
3367 }
3368
3369 /* Make sure we know which port we need to write */
3370 hw->mac.ops.set_lan_id(hw);
3371 /* Apply the port offset to the address offset */
3372 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3373 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3374
3375 for (i = 0; i < 3; i++) {
3376 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3377 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3378 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3379 san_mac_offset++;
3380 }
3381
3382 san_mac_addr_out:
3383 return status;
3384 }
3385
3386 /**
3387 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3388 * @hw: pointer to hardware structure
3389 *
3390 * Read PCIe configuration space, and get the MSI-X vector count from
3391 * the capabilities table.
3392 **/
3393 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3394 {
3395 u16 msix_count = 1;
3396 u16 max_msix_count;
3397 u16 pcie_offset;
3398
3399 switch (hw->mac.type) {
3400 case ixgbe_mac_82598EB:
3401 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3402 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3403 break;
3404 case ixgbe_mac_82599EB:
3405 case ixgbe_mac_X540:
3406 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3407 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3408 break;
3409 default:
3410 return msix_count;
3411 }
3412
3413 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3414 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3415 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3416
3417 /* MSI-X count is zero-based in HW */
3418 msix_count++;
3419
3420 if (msix_count > max_msix_count)
3421 msix_count = max_msix_count;
3422
3423 return msix_count;
3424 }
3425
3426 /**
3427 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3428 * @hw: pointer to hardware structure
3429 * @addr: Address to put into receive address register
3430 * @vmdq: VMDq pool to assign
3431 *
3432 * Puts an ethernet address into a receive address register, or
3433 * finds the rar that it is aleady in; adds to the pool list
3434 **/
3435 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3436 {
3437 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3438 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3439 u32 rar;
3440 u32 rar_low, rar_high;
3441 u32 addr_low, addr_high;
3442
3443 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3444
3445 /* swap bytes for HW little endian */
3446 addr_low = addr[0] | (addr[1] << 8)
3447 | (addr[2] << 16)
3448 | (addr[3] << 24);
3449 addr_high = addr[4] | (addr[5] << 8);
3450
3451 /*
3452 * Either find the mac_id in rar or find the first empty space.
3453 * rar_highwater points to just after the highest currently used
3454 * rar in order to shorten the search. It grows when we add a new
3455 * rar to the top.
3456 */
3457 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3458 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3459
3460 if (((IXGBE_RAH_AV & rar_high) == 0)
3461 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3462 first_empty_rar = rar;
3463 } else if ((rar_high & 0xFFFF) == addr_high) {
3464 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3465 if (rar_low == addr_low)
3466 break; /* found it already in the rars */
3467 }
3468 }
3469
3470 if (rar < hw->mac.rar_highwater) {
3471 /* already there so just add to the pool bits */
3472 (void) ixgbe_set_vmdq(hw, rar, vmdq);
3473 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3474 /* stick it into first empty RAR slot we found */
3475 rar = first_empty_rar;
3476 (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3477 } else if (rar == hw->mac.rar_highwater) {
3478 /* add it to the top of the list and inc the highwater mark */
3479 (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3480 hw->mac.rar_highwater++;
3481 } else if (rar >= hw->mac.num_rar_entries) {
3482 return IXGBE_ERR_INVALID_MAC_ADDR;
3483 }
3484
3485 /*
3486 * If we found rar[0], make sure the default pool bit (we use pool 0)
3487 * remains cleared to be sure default pool packets will get delivered
3488 */
3489 if (rar == 0)
3490 (void) ixgbe_clear_vmdq(hw, rar, 0);
3491
3492 return rar;
3493 }
3494
3495 /**
3496 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3497 * @hw: pointer to hardware struct
3498 * @rar: receive address register index to disassociate
3499 * @vmdq: VMDq pool index to remove from the rar
3500 **/
3501 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3502 {
3503 u32 mpsar_lo, mpsar_hi;
3504 u32 rar_entries = hw->mac.num_rar_entries;
3505
3506 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3507
3508 /* Make sure we are using a valid rar index range */
3509 if (rar >= rar_entries) {
3510 DEBUGOUT1("RAR index %d is out of range.\n", rar);
3511 return IXGBE_ERR_INVALID_ARGUMENT;
3512 }
3513
3514 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3515 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3516
3517 if (!mpsar_lo && !mpsar_hi)
3518 goto done;
3519
3520 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3521 if (mpsar_lo) {
3522 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3523 mpsar_lo = 0;
3524 }
3525 if (mpsar_hi) {
3526 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3527 mpsar_hi = 0;
3528 }
3529 } else if (vmdq < 32) {
3530 mpsar_lo &= ~(1 << vmdq);
3531 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3532 } else {
3533 mpsar_hi &= ~(1 << (vmdq - 32));
3534 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3535 }
3536
3537 /* was that the last pool using this rar? */
3538 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3539 hw->mac.ops.clear_rar(hw, rar);
3540 done:
3541 return IXGBE_SUCCESS;
3542 }
3543
3544 /**
3545 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3546 * @hw: pointer to hardware struct
3547 * @rar: receive address register index to associate with a VMDq index
3548 * @vmdq: VMDq pool index
3549 **/
3550 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3551 {
3552 u32 mpsar;
3553 u32 rar_entries = hw->mac.num_rar_entries;
3554
3555 DEBUGFUNC("ixgbe_set_vmdq_generic");
3556
3557 /* Make sure we are using a valid rar index range */
3558 if (rar >= rar_entries) {
3559 DEBUGOUT1("RAR index %d is out of range.\n", rar);
3560 return IXGBE_ERR_INVALID_ARGUMENT;
3561 }
3562
3563 if (vmdq < 32) {
3564 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3565 mpsar |= 1 << vmdq;
3566 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3567 } else {
3568 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3569 mpsar |= 1 << (vmdq - 32);
3570 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3571 }
3572 return IXGBE_SUCCESS;
3573 }
3574
3575 /**
3576 * This function should only be involved in the IOV mode.
3577 * In IOV mode, Default pool is next pool after the number of
3578 * VFs advertized and not 0.
3579 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3580 *
3581 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3582 * @hw: pointer to hardware struct
3583 * @vmdq: VMDq pool index
3584 **/
3585 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3586 {
3587 u32 rar = hw->mac.san_mac_rar_index;
3588
3589 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3590
3591 if (vmdq < 32) {
3592 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3593 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3594 } else {
3595 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3596 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3597 }
3598
3599 return IXGBE_SUCCESS;
3600 }
3601
3602 /**
3603 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3604 * @hw: pointer to hardware structure
3605 **/
3606 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3607 {
3608 int i;
3609
3610 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3611 DEBUGOUT(" Clearing UTA\n");
3612
3613 for (i = 0; i < 128; i++)
3614 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3615
3616 return IXGBE_SUCCESS;
3617 }
3618
3619 /**
3620 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3621 * @hw: pointer to hardware structure
3622 * @vlan: VLAN id to write to VLAN filter
3623 *
3624 * return the VLVF index where this VLAN id should be placed
3625 *
3626 **/
3627 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3628 {
3629 u32 bits = 0;
3630 u32 first_empty_slot = 0;
3631 s32 regindex;
3632
3633 /* short cut the special case */
3634 if (vlan == 0)
3635 return 0;
3636
3637 /*
3638 * Search for the vlan id in the VLVF entries. Save off the first empty
3639 * slot found along the way
3640 */
3641 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3642 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3643 if (!bits && !(first_empty_slot))
3644 first_empty_slot = regindex;
3645 else if ((bits & 0x0FFF) == vlan)
3646 break;
3647 }
3648
3649 /*
3650 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3651 * in the VLVF. Else use the first empty VLVF register for this
3652 * vlan id.
3653 */
3654 if (regindex >= IXGBE_VLVF_ENTRIES) {
3655 if (first_empty_slot)
3656 regindex = first_empty_slot;
3657 else {
3658 DEBUGOUT("No space in VLVF.\n");
3659 regindex = IXGBE_ERR_NO_SPACE;
3660 }
3661 }
3662
3663 return regindex;
3664 }
3665
3666 /**
3667 * ixgbe_set_vfta_generic - Set VLAN filter table
3668 * @hw: pointer to hardware structure
3669 * @vlan: VLAN id to write to VLAN filter
3670 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3671 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3672 *
3673 * Turn on/off specified VLAN in the VLAN filter table.
3674 **/
3675 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3676 bool vlan_on)
3677 {
3678 s32 regindex;
3679 u32 bitindex;
3680 u32 vfta;
3681 u32 targetbit;
3682 s32 ret_val = IXGBE_SUCCESS;
3683 bool vfta_changed = FALSE;
3684
3685 DEBUGFUNC("ixgbe_set_vfta_generic");
3686
3687 if (vlan > 4095)
3688 return IXGBE_ERR_PARAM;
3689
3690 /*
3691 * this is a 2 part operation - first the VFTA, then the
3692 * VLVF and VLVFB if VT Mode is set
3693 * We don't write the VFTA until we know the VLVF part succeeded.
3694 */
3695
3696 /* Part 1
3697 * The VFTA is a bitstring made up of 128 32-bit registers
3698 * that enable the particular VLAN id, much like the MTA:
3699 * bits[11-5]: which register
3700 * bits[4-0]: which bit in the register
3701 */
3702 regindex = (vlan >> 5) & 0x7F;
3703 bitindex = vlan & 0x1F;
3704 targetbit = (1 << bitindex);
3705 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3706
3707 if (vlan_on) {
3708 if (!(vfta & targetbit)) {
3709 vfta |= targetbit;
3710 vfta_changed = TRUE;
3711 }
3712 } else {
3713 if ((vfta & targetbit)) {
3714 vfta &= ~targetbit;
3715 vfta_changed = TRUE;
3716 }
3717 }
3718
3719 /* Part 2
3720 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3721 */
3722 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3723 &vfta_changed);
3724 if (ret_val != IXGBE_SUCCESS)
3725 return ret_val;
3726
3727 if (vfta_changed)
3728 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3729
3730 return IXGBE_SUCCESS;
3731 }
3732
3733 /**
3734 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3735 * @hw: pointer to hardware structure
3736 * @vlan: VLAN id to write to VLAN filter
3737 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3738 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3739 * @vfta_changed: pointer to boolean flag which indicates whether VFTA
3740 * should be changed
3741 *
3742 * Turn on/off specified bit in VLVF table.
3743 **/
3744 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3745 bool vlan_on, bool *vfta_changed)
3746 {
3747 u32 vt;
3748
3749 DEBUGFUNC("ixgbe_set_vlvf_generic");
3750
3751 if (vlan > 4095)
3752 return IXGBE_ERR_PARAM;
3753
3754 /* If VT Mode is set
3755 * Either vlan_on
3756 * make sure the vlan is in VLVF
3757 * set the vind bit in the matching VLVFB
3758 * Or !vlan_on
3759 * clear the pool bit and possibly the vind
3760 */
3761 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3762 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3763 s32 vlvf_index;
3764 u32 bits;
3765
3766 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3767 if (vlvf_index < 0)
3768 return vlvf_index;
3769
3770 if (vlan_on) {
3771 /* set the pool bit */
3772 if (vind < 32) {
3773 bits = IXGBE_READ_REG(hw,
3774 IXGBE_VLVFB(vlvf_index * 2));
3775 bits |= (1 << vind);
3776 IXGBE_WRITE_REG(hw,
3777 IXGBE_VLVFB(vlvf_index * 2),
3778 bits);
3779 } else {
3780 bits = IXGBE_READ_REG(hw,
3781 IXGBE_VLVFB((vlvf_index * 2) + 1));
3782 bits |= (1 << (vind - 32));
3783 IXGBE_WRITE_REG(hw,
3784 IXGBE_VLVFB((vlvf_index * 2) + 1),
3785 bits);
3786 }
3787 } else {
3788 /* clear the pool bit */
3789 if (vind < 32) {
3790 bits = IXGBE_READ_REG(hw,
3791 IXGBE_VLVFB(vlvf_index * 2));
3792 bits &= ~(1 << vind);
3793 IXGBE_WRITE_REG(hw,
3794 IXGBE_VLVFB(vlvf_index * 2),
3795 bits);
3796 bits |= IXGBE_READ_REG(hw,
3797 IXGBE_VLVFB((vlvf_index * 2) + 1));
3798 } else {
3799 bits = IXGBE_READ_REG(hw,
3800 IXGBE_VLVFB((vlvf_index * 2) + 1));
3801 bits &= ~(1 << (vind - 32));
3802 IXGBE_WRITE_REG(hw,
3803 IXGBE_VLVFB((vlvf_index * 2) + 1),
3804 bits);
3805 bits |= IXGBE_READ_REG(hw,
3806 IXGBE_VLVFB(vlvf_index * 2));
3807 }
3808 }
3809
3810 /*
3811 * If there are still bits set in the VLVFB registers
3812 * for the VLAN ID indicated we need to see if the
3813 * caller is requesting that we clear the VFTA entry bit.
3814 * If the caller has requested that we clear the VFTA
3815 * entry bit but there are still pools/VFs using this VLAN
3816 * ID entry then ignore the request. We're not worried
3817 * about the case where we're turning the VFTA VLAN ID
3818 * entry bit on, only when requested to turn it off as
3819 * there may be multiple pools and/or VFs using the
3820 * VLAN ID entry. In that case we cannot clear the
3821 * VFTA bit until all pools/VFs using that VLAN ID have also
3822 * been cleared. This will be indicated by "bits" being
3823 * zero.
3824 */
3825 if (bits) {
3826 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3827 (IXGBE_VLVF_VIEN | vlan));
3828 if ((!vlan_on) && (vfta_changed != NULL)) {
3829 /* someone wants to clear the vfta entry
3830 * but some pools/VFs are still using it.
3831 * Ignore it. */
3832 *vfta_changed = FALSE;
3833 }
3834 } else
3835 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3836 }
3837
3838 return IXGBE_SUCCESS;
3839 }
3840
3841 /**
3842 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3843 * @hw: pointer to hardware structure
3844 *
3845 * Clears the VLAN filer table, and the VMDq index associated with the filter
3846 **/
3847 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3848 {
3849 u32 offset;
3850
3851 DEBUGFUNC("ixgbe_clear_vfta_generic");
3852
3853 for (offset = 0; offset < hw->mac.vft_size; offset++)
3854 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3855
3856 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3857 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3858 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3859 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3860 }
3861
3862 return IXGBE_SUCCESS;
3863 }
3864
3865 /**
3866 * ixgbe_check_mac_link_generic - Determine link and speed status
3867 * @hw: pointer to hardware structure
3868 * @speed: pointer to link speed
3869 * @link_up: TRUE when link is up
3870 * @link_up_wait_to_complete: bool used to wait for link up or not
3871 *
3872 * Reads the links register to determine if link is up and the current speed
3873 **/
3874 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3875 bool *link_up, bool link_up_wait_to_complete)
3876 {
3877 u32 links_reg, links_orig;
3878 u32 i;
3879
3880 DEBUGFUNC("ixgbe_check_mac_link_generic");
3881
3882 /* clear the old state */
3883 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3884
3885 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3886
3887 if (links_orig != links_reg) {
3888 DEBUGOUT2("LINKS changed from %08X to %08X\n",
3889 links_orig, links_reg);
3890 }
3891
3892 if (link_up_wait_to_complete) {
3893 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3894 if (links_reg & IXGBE_LINKS_UP) {
3895 *link_up = TRUE;
3896 break;
3897 } else {
3898 *link_up = FALSE;
3899 }
3900 msec_delay(100);
3901 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3902 }
3903 } else {
3904 if (links_reg & IXGBE_LINKS_UP)
3905 *link_up = TRUE;
3906 else
3907 *link_up = FALSE;
3908 }
3909
3910 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3911 IXGBE_LINKS_SPEED_10G_82599)
3912 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3913 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3914 IXGBE_LINKS_SPEED_1G_82599)
3915 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3916 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3917 IXGBE_LINKS_SPEED_100_82599)
3918 *speed = IXGBE_LINK_SPEED_100_FULL;
3919 else
3920 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3921
3922 return IXGBE_SUCCESS;
3923 }
3924
3925 /**
3926 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3927 * the EEPROM
3928 * @hw: pointer to hardware structure
3929 * @wwnn_prefix: the alternative WWNN prefix
3930 * @wwpn_prefix: the alternative WWPN prefix
3931 *
3932 * This function will read the EEPROM from the alternative SAN MAC address
3933 * block to check the support for the alternative WWNN/WWPN prefix support.
3934 **/
3935 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3936 u16 *wwpn_prefix)
3937 {
3938 u16 offset, caps;
3939 u16 alt_san_mac_blk_offset;
3940
3941 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
3942
3943 /* clear output first */
3944 *wwnn_prefix = 0xFFFF;
3945 *wwpn_prefix = 0xFFFF;
3946
3947 /* check if alternative SAN MAC is supported */
3948 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
3949 &alt_san_mac_blk_offset);
3950
3951 if ((alt_san_mac_blk_offset == 0) ||
3952 (alt_san_mac_blk_offset == 0xFFFF))
3953 goto wwn_prefix_out;
3954
3955 /* check capability in alternative san mac address block */
3956 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3957 hw->eeprom.ops.read(hw, offset, &caps);
3958 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3959 goto wwn_prefix_out;
3960
3961 /* get the corresponding prefix for WWNN/WWPN */
3962 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3963 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
3964
3965 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3966 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
3967
3968 wwn_prefix_out:
3969 return IXGBE_SUCCESS;
3970 }
3971
3972 /**
3973 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
3974 * @hw: pointer to hardware structure
3975 * @bs: the fcoe boot status
3976 *
3977 * This function will read the FCOE boot status from the iSCSI FCOE block
3978 **/
3979 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
3980 {
3981 u16 offset, caps, flags;
3982 s32 status;
3983
3984 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
3985
3986 /* clear output first */
3987 *bs = ixgbe_fcoe_bootstatus_unavailable;
3988
3989 /* check if FCOE IBA block is present */
3990 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
3991 status = hw->eeprom.ops.read(hw, offset, &caps);
3992 if (status != IXGBE_SUCCESS)
3993 goto out;
3994
3995 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
3996 goto out;
3997
3998 /* check if iSCSI FCOE block is populated */
3999 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4000 if (status != IXGBE_SUCCESS)
4001 goto out;
4002
4003 if ((offset == 0) || (offset == 0xFFFF))
4004 goto out;
4005
4006 /* read fcoe flags in iSCSI FCOE block */
4007 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4008 status = hw->eeprom.ops.read(hw, offset, &flags);
4009 if (status != IXGBE_SUCCESS)
4010 goto out;
4011
4012 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4013 *bs = ixgbe_fcoe_bootstatus_enabled;
4014 else
4015 *bs = ixgbe_fcoe_bootstatus_disabled;
4016
4017 out:
4018 return status;
4019 }
4020
4021 /**
4022 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4023 * @hw: pointer to hardware structure
4024 * @enable: enable or disable switch for anti-spoofing
4025 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
4026 *
4027 **/
4028 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
4029 {
4030 int j;
4031 int pf_target_reg = pf >> 3;
4032 int pf_target_shift = pf % 8;
4033 u32 pfvfspoof = 0;
4034
4035 if (hw->mac.type == ixgbe_mac_82598EB)
4036 return;
4037
4038 if (enable)
4039 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
4040
4041 /*
4042 * PFVFSPOOF register array is size 8 with 8 bits assigned to
4043 * MAC anti-spoof enables in each register array element.
4044 */
4045 for (j = 0; j < pf_target_reg; j++)
4046 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4047
4048 /*
4049 * The PF should be allowed to spoof so that it can support
4050 * emulation mode NICs. Do not set the bits assigned to the PF
4051 */
4052 pfvfspoof &= (1 << pf_target_shift) - 1;
4053 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4054
4055 /*
4056 * Remaining pools belong to the PF so they do not need to have
4057 * anti-spoofing enabled.
4058 */
4059 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
4060 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
4061 }
4062
4063 /**
4064 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4065 * @hw: pointer to hardware structure
4066 * @enable: enable or disable switch for VLAN anti-spoofing
4067 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4068 *
4069 **/
4070 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4071 {
4072 int vf_target_reg = vf >> 3;
4073 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4074 u32 pfvfspoof;
4075
4076 if (hw->mac.type == ixgbe_mac_82598EB)
4077 return;
4078
4079 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4080 if (enable)
4081 pfvfspoof |= (1 << vf_target_shift);
4082 else
4083 pfvfspoof &= ~(1 << vf_target_shift);
4084 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4085 }
4086
4087 /**
4088 * ixgbe_get_device_caps_generic - Get additional device capabilities
4089 * @hw: pointer to hardware structure
4090 * @device_caps: the EEPROM word with the extra device capabilities
4091 *
4092 * This function will read the EEPROM location for the device capabilities,
4093 * and return the word through device_caps.
4094 **/
4095 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4096 {
4097 DEBUGFUNC("ixgbe_get_device_caps_generic");
4098
4099 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4100
4101 return IXGBE_SUCCESS;
4102 }
4103
4104 /**
4105 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4106 * @hw: pointer to hardware structure
4107 *
4108 **/
4109 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4110 {
4111 u32 regval;
4112 u32 i;
4113
4114 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4115
4116 /* Enable relaxed ordering */
4117 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4118 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4119 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4120 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4121 }
4122
4123 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4124 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4125 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4126 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4127 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4128 }
4129
4130 }
4131
4132 /**
4133 * ixgbe_calculate_checksum - Calculate checksum for buffer
4134 * @buffer: pointer to EEPROM
4135 * @length: size of EEPROM to calculate a checksum for
4136 * Calculates the checksum for some buffer on a specified length. The
4137 * checksum calculated is returned.
4138 **/
4139 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4140 {
4141 u32 i;
4142 u8 sum = 0;
4143
4144 DEBUGFUNC("ixgbe_calculate_checksum");
4145
4146 if (!buffer)
4147 return 0;
4148
4149 for (i = 0; i < length; i++)
4150 sum += buffer[i];
4151
4152 return (u8) (0 - sum);
4153 }
4154
4155 /**
4156 * ixgbe_host_interface_command - Issue command to manageability block
4157 * @hw: pointer to the HW structure
4158 * @buffer: contains the command to write and where the return status will
4159 * be placed
4160 * @length: length of buffer, must be multiple of 4 bytes
4161 *
4162 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4163 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
4164 **/
4165 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4166 u32 length)
4167 {
4168 u32 hicr, i, bi;
4169 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4170 u8 buf_len, dword_len;
4171
4172 s32 ret_val = IXGBE_SUCCESS;
4173
4174 DEBUGFUNC("ixgbe_host_interface_command");
4175
4176 if (length == 0 || length & 0x3 ||
4177 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4178 DEBUGOUT("Buffer length failure.\n");
4179 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4180 goto out;
4181 }
4182
4183 /* Check that the host interface is enabled. */
4184 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4185 if ((hicr & IXGBE_HICR_EN) == 0) {
4186 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4187 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4188 goto out;
4189 }
4190
4191 /* Calculate length in DWORDs */
4192 dword_len = length >> 2;
4193
4194 /*
4195 * The device driver writes the relevant command block
4196 * into the ram area.
4197 */
4198 for (i = 0; i < dword_len; i++)
4199 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4200 i, IXGBE_CPU_TO_LE32(buffer[i]));
4201
4202 /* Setting this bit tells the ARC that a new command is pending. */
4203 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4204
4205 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
4206 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4207 if (!(hicr & IXGBE_HICR_C))
4208 break;
4209 msec_delay(1);
4210 }
4211
4212 /* Check command successful completion. */
4213 if (i == IXGBE_HI_COMMAND_TIMEOUT ||
4214 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
4215 DEBUGOUT("Command has failed with no status valid.\n");
4216 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4217 goto out;
4218 }
4219
4220 /* Calculate length in DWORDs */
4221 dword_len = hdr_size >> 2;
4222
4223 /* first pull in the header so we know the buffer length */
4224 for (bi = 0; bi < dword_len; bi++) {
4225 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4226 buffer[bi] = IXGBE_LE32_TO_CPUS(buffer[bi]);
4227 }
4228
4229 /* If there is any thing in data position pull it in */
4230 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4231 if (buf_len == 0)
4232 goto out;
4233
4234 if (length < (buf_len + hdr_size)) {
4235 DEBUGOUT("Buffer not large enough for reply message.\n");
4236 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4237 goto out;
4238 }
4239
4240 /* Calculate length in DWORDs, add 3 for odd lengths */
4241 dword_len = (buf_len + 3) >> 2;
4242
4243 /* Pull in the rest of the buffer (bi is where we left off)*/
4244 for (; bi <= dword_len; bi++) {
4245 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4246 buffer[bi] = IXGBE_LE32_TO_CPUS(buffer[bi]);
4247 }
4248
4249 out:
4250 return ret_val;
4251 }
4252
4253 /**
4254 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4255 * @hw: pointer to the HW structure
4256 * @maj: driver version major number
4257 * @min: driver version minor number
4258 * @build: driver version build number
4259 * @sub: driver version sub build number
4260 *
4261 * Sends driver version number to firmware through the manageability
4262 * block. On success return IXGBE_SUCCESS
4263 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4264 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4265 **/
4266 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4267 u8 build, u8 sub)
4268 {
4269 struct ixgbe_hic_drv_info fw_cmd;
4270 int i;
4271 s32 ret_val = IXGBE_SUCCESS;
4272
4273 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4274
4275 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4276 != IXGBE_SUCCESS) {
4277 ret_val = IXGBE_ERR_SWFW_SYNC;
4278 goto out;
4279 }
4280
4281 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4282 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4283 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4284 fw_cmd.port_num = (u8)hw->bus.func;
4285 fw_cmd.ver_maj = maj;
4286 fw_cmd.ver_min = min;
4287 fw_cmd.ver_build = build;
4288 fw_cmd.ver_sub = sub;
4289 fw_cmd.hdr.checksum = 0;
4290 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4291 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4292 fw_cmd.pad = 0;
4293 fw_cmd.pad2 = 0;
4294
4295 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4296 /* LINTED */
4297 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4298 sizeof(fw_cmd));
4299 if (ret_val != IXGBE_SUCCESS)
4300 continue;
4301
4302 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4303 FW_CEM_RESP_STATUS_SUCCESS)
4304 ret_val = IXGBE_SUCCESS;
4305 else
4306 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4307
4308 break;
4309 }
4310
4311 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4312 out:
4313 return ret_val;
4314 }
4315
4316 /**
4317 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4318 * @hw: pointer to hardware structure
4319 * @num_pb: number of packet buffers to allocate
4320 * @headroom: reserve n KB of headroom
4321 * @strategy: packet buffer allocation strategy
4322 **/
4323 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4324 int strategy)
4325 {
4326 u32 pbsize = hw->mac.rx_pb_size;
4327 int i = 0;
4328 u32 rxpktsize, txpktsize, txpbthresh;
4329
4330 /* Reserve headroom */
4331 pbsize -= headroom;
4332
4333 if (!num_pb)
4334 num_pb = 1;
4335
4336 /* Divide remaining packet buffer space amongst the number of packet
4337 * buffers requested using supplied strategy.
4338 */
4339 switch (strategy) {
4340 case PBA_STRATEGY_WEIGHTED:
4341 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4342 * buffer with 5/8 of the packet buffer space.
4343 */
4344 rxpktsize = (pbsize * 5) / (num_pb * 4);
4345 pbsize -= rxpktsize * (num_pb / 2);
4346 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4347 for (; i < (num_pb / 2); i++)
4348 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4349 /* Fall through to configure remaining packet buffers */
4350 /* FALLTHRU */
4351 case PBA_STRATEGY_EQUAL:
4352 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4353 for (; i < num_pb; i++)
4354 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4355 break;
4356 default:
4357 break;
4358 }
4359
4360 /* Only support an equally distributed Tx packet buffer strategy. */
4361 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4362 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4363 for (i = 0; i < num_pb; i++) {
4364 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4365 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4366 }
4367
4368 /* Clear unused TCs, if any, to zero buffer size*/
4369 for (; i < IXGBE_MAX_PB; i++) {
4370 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4371 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4372 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4373 }
4374 }
4375
4376 /**
4377 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4378 * @hw: pointer to the hardware structure
4379 *
4380 * The 82599 and x540 MACs can experience issues if TX work is still pending
4381 * when a reset occurs. This function prevents this by flushing the PCIe
4382 * buffers on the system.
4383 **/
4384 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4385 {
4386 u32 gcr_ext, hlreg0;
4387
4388 /*
4389 * If double reset is not requested then all transactions should
4390 * already be clear and as such there is no work to do
4391 */
4392 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4393 return;
4394
4395 /*
4396 * Set loopback enable to prevent any transmits from being sent
4397 * should the link come up. This assumes that the RXCTRL.RXEN bit
4398 * has already been cleared.
4399 */
4400 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4401 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4402
4403 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4404 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4405 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4406 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4407
4408 /* Flush all writes and allow 20usec for all transactions to clear */
4409 IXGBE_WRITE_FLUSH(hw);
4410 usec_delay(20);
4411
4412 /* restore previous register values */
4413 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4414 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4415 }
4416