Print this page
3014 Intel X540 Support
*** 1,8 ****
/******************************************************************************
! Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
--- 1,8 ----
/******************************************************************************
! Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
*** 28,38 ****
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
! /*$FreeBSD$*/
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
#include "ixgbe_api.h"
--- 28,38 ----
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
! /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_common.c,v 1.14 2012/07/05 20:51:44 jfv Exp $*/
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
#include "ixgbe_api.h"
*** 49,67 ****
static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
u16 *san_mac_offset);
! static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
! static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
! static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
! static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
! static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
! u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
- s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
-
/**
* ixgbe_init_ops_generic - Inits function ptrs
* @hw: pointer to the hardware structure
*
* Initialize the function pointers.
--- 49,65 ----
static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
u16 *san_mac_offset);
! static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
! u16 words, u16 *data);
! static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
! u16 words, u16 *data);
! static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
! u16 offset);
/**
* ixgbe_init_ops_generic - Inits function ptrs
* @hw: pointer to the hardware structure
*
* Initialize the function pointers.
*** 75,89 ****
DEBUGFUNC("ixgbe_init_ops_generic");
/* EEPROM */
eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
! if (eec & (1 << 8))
eeprom->ops.read = &ixgbe_read_eerd_generic;
! else
eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
eeprom->ops.write = &ixgbe_write_eeprom_generic;
eeprom->ops.validate_checksum =
&ixgbe_validate_eeprom_checksum_generic;
eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
--- 73,92 ----
DEBUGFUNC("ixgbe_init_ops_generic");
/* EEPROM */
eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
/* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
! if (eec & IXGBE_EEC_PRES) {
eeprom->ops.read = &ixgbe_read_eerd_generic;
! eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
! } else {
eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
+ eeprom->ops.read_buffer =
+ &ixgbe_read_eeprom_buffer_bit_bang_generic;
+ }
eeprom->ops.write = &ixgbe_write_eeprom_generic;
+ eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
eeprom->ops.validate_checksum =
&ixgbe_validate_eeprom_checksum_generic;
eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
*** 119,128 ****
--- 122,132 ----
mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
mac->ops.enable_mc = &ixgbe_enable_mc_generic;
mac->ops.disable_mc = &ixgbe_disable_mc_generic;
mac->ops.clear_vfta = NULL;
mac->ops.set_vfta = NULL;
+ mac->ops.set_vlvf = NULL;
mac->ops.init_uta_tables = NULL;
/* Flow Control */
mac->ops.fc_enable = &ixgbe_fc_enable_generic;
*** 133,142 ****
--- 137,316 ----
return IXGBE_SUCCESS;
}
/**
+ * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
+ * control
+ * @hw: pointer to hardware structure
+ *
+ * There are several phys that do not support autoneg flow control. This
+ * function check the device id to see if the associated phy supports
+ * autoneg flow control.
+ **/
+ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+ {
+
+ DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
+ return IXGBE_SUCCESS;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ return IXGBE_SUCCESS;
+ default:
+ return IXGBE_ERR_FC_NOT_SUPPORTED;
+ }
+ }
+
+ /**
+ * ixgbe_setup_fc - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+ {
+ s32 ret_val = IXGBE_SUCCESS;
+ u32 reg = 0, reg_bp = 0;
+ u16 reg_cu = 0;
+
+ DEBUGFUNC("ixgbe_setup_fc");
+
+ /*
+ * Validate the requested mode. Strict IEEE mode does not allow
+ * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
+ */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /*
+ * 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /*
+ * Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do fc autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
+ */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_backplane:
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ break;
+ case ixgbe_media_type_copper:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ reg |= IXGBE_PCS1GANA_ASM_PAUSE;
+ reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
+ reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
+ } else if (hw->phy.media_type == ixgbe_media_type_copper) {
+ reg_cu |= IXGBE_TAF_ASM_PAUSE;
+ reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
+ }
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE;
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ }
+
+ if (hw->mac.type != ixgbe_mac_X540) {
+ /*
+ * Enable auto-negotiation between the MAC & PHY;
+ * the MAC will advertise clause 37 flow control.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+
+ /* Disable AN timeout */
+ if (hw->fc.strict_ieee)
+ reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+ DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+ }
+
+ /*
+ * AUTOC restart handles negotiation of 1G and 10G on backplane
+ * and copper. There is no need to set the PCS1GCTL register.
+ *
+ */
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+ } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+ (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
+ }
+
+ DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+ out:
+ return ret_val;
+ }
+
+ /**
* ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
* Starts the hardware by filling the bus info structure and media type, clears
* all on chip counters, initializes receive address registers, multicast
*** 143,152 ****
--- 317,327 ----
* table, VLAN filter table, calls routine to set up link and flow control
* settings, and leaves transmit and receive units disabled and uninitialized
**/
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
+ s32 ret_val;
u32 ctrl_ext;
DEBUGFUNC("ixgbe_start_hw_generic");
/* Set the media type */
*** 165,180 ****
ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
! (void) ixgbe_setup_fc(hw, 0);
/* Clear adapter stopped flag */
hw->adapter_stopped = FALSE;
! return IXGBE_SUCCESS;
}
/**
* ixgbe_start_hw_gen2 - Init sequence for common device family
* @hw: pointer to hw structure
--- 340,358 ----
ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
! ret_val = ixgbe_setup_fc(hw);
! if (ret_val != IXGBE_SUCCESS)
! goto out;
/* Clear adapter stopped flag */
hw->adapter_stopped = FALSE;
! out:
! return ret_val;
}
/**
* ixgbe_start_hw_gen2 - Init sequence for common device family
* @hw: pointer to hw structure
*** 198,215 ****
IXGBE_WRITE_FLUSH(hw);
/* Disable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
! regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
}
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
! regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
! IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
return IXGBE_SUCCESS;
}
--- 376,393 ----
IXGBE_WRITE_FLUSH(hw);
/* Disable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
! regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
}
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
! regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
! IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
return IXGBE_SUCCESS;
}
*** 252,345 ****
{
u16 i = 0;
DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
! (void) IXGBE_READ_REG(hw, IXGBE_CRCERRS);
! (void) IXGBE_READ_REG(hw, IXGBE_ILLERRC);
! (void) IXGBE_READ_REG(hw, IXGBE_ERRBC);
! (void) IXGBE_READ_REG(hw, IXGBE_MSPDC);
for (i = 0; i < 8; i++)
! (void) IXGBE_READ_REG(hw, IXGBE_MPC(i));
! (void) IXGBE_READ_REG(hw, IXGBE_MLFC);
! (void) IXGBE_READ_REG(hw, IXGBE_MRFC);
! (void) IXGBE_READ_REG(hw, IXGBE_RLEC);
! (void) IXGBE_READ_REG(hw, IXGBE_LXONTXC);
! (void) IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
if (hw->mac.type >= ixgbe_mac_82599EB) {
! (void) IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
! (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
} else {
! (void) IXGBE_READ_REG(hw, IXGBE_LXONRXC);
! (void) IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
}
for (i = 0; i < 8; i++) {
! (void) IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
! (void) IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
if (hw->mac.type >= ixgbe_mac_82599EB) {
! (void) IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
! (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
} else {
! (void) IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
! (void) IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
}
}
if (hw->mac.type >= ixgbe_mac_82599EB)
for (i = 0; i < 8; i++)
! (void) IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
! (void) IXGBE_READ_REG(hw, IXGBE_PRC64);
! (void) IXGBE_READ_REG(hw, IXGBE_PRC127);
! (void) IXGBE_READ_REG(hw, IXGBE_PRC255);
! (void) IXGBE_READ_REG(hw, IXGBE_PRC511);
! (void) IXGBE_READ_REG(hw, IXGBE_PRC1023);
! (void) IXGBE_READ_REG(hw, IXGBE_PRC1522);
! (void) IXGBE_READ_REG(hw, IXGBE_GPRC);
! (void) IXGBE_READ_REG(hw, IXGBE_BPRC);
! (void) IXGBE_READ_REG(hw, IXGBE_MPRC);
! (void) IXGBE_READ_REG(hw, IXGBE_GPTC);
! (void) IXGBE_READ_REG(hw, IXGBE_GORCL);
! (void) IXGBE_READ_REG(hw, IXGBE_GORCH);
! (void) IXGBE_READ_REG(hw, IXGBE_GOTCL);
! (void) IXGBE_READ_REG(hw, IXGBE_GOTCH);
for (i = 0; i < 8; i++)
! (void) IXGBE_READ_REG(hw, IXGBE_RNBC(i));
! (void) IXGBE_READ_REG(hw, IXGBE_RUC);
! (void) IXGBE_READ_REG(hw, IXGBE_RFC);
! (void) IXGBE_READ_REG(hw, IXGBE_ROC);
! (void) IXGBE_READ_REG(hw, IXGBE_RJC);
! (void) IXGBE_READ_REG(hw, IXGBE_MNGPRC);
! (void) IXGBE_READ_REG(hw, IXGBE_MNGPDC);
! (void) IXGBE_READ_REG(hw, IXGBE_MNGPTC);
! (void) IXGBE_READ_REG(hw, IXGBE_TORL);
! (void) IXGBE_READ_REG(hw, IXGBE_TORH);
! (void) IXGBE_READ_REG(hw, IXGBE_TPR);
! (void) IXGBE_READ_REG(hw, IXGBE_TPT);
! (void) IXGBE_READ_REG(hw, IXGBE_PTC64);
! (void) IXGBE_READ_REG(hw, IXGBE_PTC127);
! (void) IXGBE_READ_REG(hw, IXGBE_PTC255);
! (void) IXGBE_READ_REG(hw, IXGBE_PTC511);
! (void) IXGBE_READ_REG(hw, IXGBE_PTC1023);
! (void) IXGBE_READ_REG(hw, IXGBE_PTC1522);
! (void) IXGBE_READ_REG(hw, IXGBE_MPTC);
! (void) IXGBE_READ_REG(hw, IXGBE_BPTC);
for (i = 0; i < 16; i++) {
! (void) IXGBE_READ_REG(hw, IXGBE_QPRC(i));
! (void) IXGBE_READ_REG(hw, IXGBE_QPTC(i));
if (hw->mac.type >= ixgbe_mac_82599EB) {
! (void) IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
! (void) IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
! (void) IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
! (void) IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
! (void) IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
} else {
! (void) IXGBE_READ_REG(hw, IXGBE_QBRC(i));
! (void) IXGBE_READ_REG(hw, IXGBE_QBTC(i));
}
}
return IXGBE_SUCCESS;
}
/**
* ixgbe_read_pba_string_generic - Reads part number string from EEPROM
--- 430,537 ----
{
u16 i = 0;
DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
! IXGBE_READ_REG(hw, IXGBE_CRCERRS);
! IXGBE_READ_REG(hw, IXGBE_ILLERRC);
! IXGBE_READ_REG(hw, IXGBE_ERRBC);
! IXGBE_READ_REG(hw, IXGBE_MSPDC);
for (i = 0; i < 8; i++)
! IXGBE_READ_REG(hw, IXGBE_MPC(i));
! IXGBE_READ_REG(hw, IXGBE_MLFC);
! IXGBE_READ_REG(hw, IXGBE_MRFC);
! IXGBE_READ_REG(hw, IXGBE_RLEC);
! IXGBE_READ_REG(hw, IXGBE_LXONTXC);
! IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
if (hw->mac.type >= ixgbe_mac_82599EB) {
! IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
! IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
} else {
! IXGBE_READ_REG(hw, IXGBE_LXONRXC);
! IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
}
for (i = 0; i < 8; i++) {
! IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
! IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
if (hw->mac.type >= ixgbe_mac_82599EB) {
! IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
! IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
} else {
! IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
! IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
}
}
if (hw->mac.type >= ixgbe_mac_82599EB)
for (i = 0; i < 8; i++)
! IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
! IXGBE_READ_REG(hw, IXGBE_PRC64);
! IXGBE_READ_REG(hw, IXGBE_PRC127);
! IXGBE_READ_REG(hw, IXGBE_PRC255);
! IXGBE_READ_REG(hw, IXGBE_PRC511);
! IXGBE_READ_REG(hw, IXGBE_PRC1023);
! IXGBE_READ_REG(hw, IXGBE_PRC1522);
! IXGBE_READ_REG(hw, IXGBE_GPRC);
! IXGBE_READ_REG(hw, IXGBE_BPRC);
! IXGBE_READ_REG(hw, IXGBE_MPRC);
! IXGBE_READ_REG(hw, IXGBE_GPTC);
! IXGBE_READ_REG(hw, IXGBE_GORCL);
! IXGBE_READ_REG(hw, IXGBE_GORCH);
! IXGBE_READ_REG(hw, IXGBE_GOTCL);
! IXGBE_READ_REG(hw, IXGBE_GOTCH);
! if (hw->mac.type == ixgbe_mac_82598EB)
for (i = 0; i < 8; i++)
! IXGBE_READ_REG(hw, IXGBE_RNBC(i));
! IXGBE_READ_REG(hw, IXGBE_RUC);
! IXGBE_READ_REG(hw, IXGBE_RFC);
! IXGBE_READ_REG(hw, IXGBE_ROC);
! IXGBE_READ_REG(hw, IXGBE_RJC);
! IXGBE_READ_REG(hw, IXGBE_MNGPRC);
! IXGBE_READ_REG(hw, IXGBE_MNGPDC);
! IXGBE_READ_REG(hw, IXGBE_MNGPTC);
! IXGBE_READ_REG(hw, IXGBE_TORL);
! IXGBE_READ_REG(hw, IXGBE_TORH);
! IXGBE_READ_REG(hw, IXGBE_TPR);
! IXGBE_READ_REG(hw, IXGBE_TPT);
! IXGBE_READ_REG(hw, IXGBE_PTC64);
! IXGBE_READ_REG(hw, IXGBE_PTC127);
! IXGBE_READ_REG(hw, IXGBE_PTC255);
! IXGBE_READ_REG(hw, IXGBE_PTC511);
! IXGBE_READ_REG(hw, IXGBE_PTC1023);
! IXGBE_READ_REG(hw, IXGBE_PTC1522);
! IXGBE_READ_REG(hw, IXGBE_MPTC);
! IXGBE_READ_REG(hw, IXGBE_BPTC);
for (i = 0; i < 16; i++) {
! IXGBE_READ_REG(hw, IXGBE_QPRC(i));
! IXGBE_READ_REG(hw, IXGBE_QPTC(i));
if (hw->mac.type >= ixgbe_mac_82599EB) {
! IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
! IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
! IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
! IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
! IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
} else {
! IXGBE_READ_REG(hw, IXGBE_QBRC(i));
! IXGBE_READ_REG(hw, IXGBE_QBTC(i));
}
}
+ if (hw->mac.type == ixgbe_mac_X540) {
+ if (hw->phy.id == 0)
+ ixgbe_identify_phy(hw);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
+ IXGBE_MDIO_PCS_DEV_TYPE, &i);
+ }
+
return IXGBE_SUCCESS;
}
/**
* ixgbe_read_pba_string_generic - Reads part number string from EEPROM
*** 451,520 ****
return IXGBE_SUCCESS;
}
/**
- * ixgbe_read_pba_length_generic - Reads part number length from EEPROM
- * @hw: pointer to hardware structure
- * @pba_num_size: part number string buffer length
- *
- * Reads the part number length from the EEPROM.
- * Returns expected buffer size in pba_num_size
- **/
- s32 ixgbe_read_pba_length_generic(struct ixgbe_hw *hw, u32 *pba_num_size)
- {
- s32 ret_val;
- u16 data;
- u16 pba_ptr;
- u16 length;
-
- DEBUGFUNC("ixgbe_read_pba_length_generic");
-
- if (pba_num_size == NULL) {
- DEBUGOUT("PBA buffer size was null\n");
- return IXGBE_ERR_INVALID_ARGUMENT;
- }
-
- ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- /* if data is not ptr guard the PBA must be in legacy format */
- if (data != IXGBE_PBANUM_PTR_GUARD) {
- *pba_num_size = 11;
- return IXGBE_SUCCESS;
- }
-
- ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
- if (ret_val) {
- DEBUGOUT("NVM Read Error\n");
- return ret_val;
- }
-
- if (length == 0xFFFF || length == 0) {
- DEBUGOUT("NVM PBA number section invalid length\n");
- return IXGBE_ERR_PBA_SECTION;
- }
-
- /*
- * Convert from length in u16 values to u8 chars, add 1 for NULL,
- * and subtract 2 because length field is included in length.
- */
- *pba_num_size = ((u32)length * 2) - 1;
-
- return IXGBE_SUCCESS;
- }
-
- /**
* ixgbe_read_pba_num_generic - Reads part number from EEPROM
* @hw: pointer to hardware structure
* @pba_num: stores the part number from the EEPROM
*
* Reads the part number from the EEPROM.
--- 643,652 ----
*** 616,625 ****
--- 748,760 ----
hw->bus.speed = ixgbe_bus_speed_2500;
break;
case IXGBE_PCI_LINK_SPEED_5000:
hw->bus.speed = ixgbe_bus_speed_5000;
break;
+ case IXGBE_PCI_LINK_SPEED_8000:
+ hw->bus.speed = ixgbe_bus_speed_8000;
+ break;
default:
hw->bus.speed = ixgbe_bus_speed_unknown;
break;
}
*** 661,671 ****
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
{
- u32 number_of_queues;
u32 reg_val;
u16 i;
DEBUGFUNC("ixgbe_stop_adapter_generic");
--- 796,805 ----
*** 674,712 ****
* the hardware
*/
hw->adapter_stopped = TRUE;
/* Disable the receive unit */
! reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
! reg_val &= ~(IXGBE_RXCTRL_RXEN);
! IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
! IXGBE_WRITE_FLUSH(hw);
! msec_delay(2);
! /* Clear interrupt mask to stop from interrupts being generated */
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
! /* Clear any pending interrupts */
! (void) IXGBE_READ_REG(hw, IXGBE_EICR);
/* Disable the transmit unit. Each queue must be disabled. */
! number_of_queues = hw->mac.max_tx_queues;
! for (i = 0; i < number_of_queues; i++) {
! reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
! if (reg_val & IXGBE_TXDCTL_ENABLE) {
! reg_val &= ~IXGBE_TXDCTL_ENABLE;
! IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
}
- }
/*
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests
*/
! (void) ixgbe_disable_pcie_master(hw);
!
! return IXGBE_SUCCESS;
}
/**
* ixgbe_led_on_generic - Turns on the software controllable LEDs.
* @hw: pointer to hardware structure
--- 808,846 ----
* the hardware
*/
hw->adapter_stopped = TRUE;
/* Disable the receive unit */
! IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
! /* Clear interrupt mask to stop interrupts from being generated */
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
! /* Clear any pending interrupts, flush previous writes */
! IXGBE_READ_REG(hw, IXGBE_EICR);
/* Disable the transmit unit. Each queue must be disabled. */
! for (i = 0; i < hw->mac.max_tx_queues; i++)
! IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
!
! /* Disable the receive unit by stopping each queue */
! for (i = 0; i < hw->mac.max_rx_queues; i++) {
! reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
! reg_val &= ~IXGBE_RXDCTL_ENABLE;
! reg_val |= IXGBE_RXDCTL_SWFLSH;
! IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
}
+ /* flush all queues disables */
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(2);
+
/*
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests
*/
! return ixgbe_disable_pcie_master(hw);
}
/**
* ixgbe_led_on_generic - Turns on the software controllable LEDs.
* @hw: pointer to hardware structure
*** 765,774 ****
--- 899,910 ----
if (eeprom->type == ixgbe_eeprom_uninitialized) {
eeprom->type = ixgbe_eeprom_none;
/* Set default semaphore delay to 10ms which is a well
* tested value */
eeprom->semaphore_delay = 10;
+ /* Clear EEPROM page size, it will be initialized as needed */
+ eeprom->word_page_size = 0;
/*
* Check for EEPROM present first.
* If not present leave as none
*/
*** 781,791 ****
* change if a future EEPROM is not SPI.
*/
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
! IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT);
}
if (eec & IXGBE_EEC_ADDR_SIZE)
eeprom->address_bits = 16;
else
--- 917,927 ----
* change if a future EEPROM is not SPI.
*/
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
! IXGBE_EEPROM_WORD_SIZE_SHIFT);
}
if (eec & IXGBE_EEC_ADDR_SIZE)
eeprom->address_bits = 16;
else
*** 797,828 ****
return IXGBE_SUCCESS;
}
/**
! * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
* @hw: pointer to hardware structure
! * @offset: offset within the EEPROM to be written to
! * @data: 16 bit word to be written to the EEPROM
*
! * If ixgbe_eeprom_update_checksum is not called after this function, the
! * EEPROM will most likely contain an invalid checksum.
**/
! s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
! s32 status;
! u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
! DEBUGFUNC("ixgbe_write_eeprom_generic");
hw->eeprom.ops.init_params(hw);
! if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
/* Prepare the EEPROM for writing */
status = ixgbe_acquire_eeprom(hw);
if (status == IXGBE_SUCCESS) {
if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
--- 933,1018 ----
return IXGBE_SUCCESS;
}
/**
! * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
* @hw: pointer to hardware structure
! * @offset: offset within the EEPROM to write
! * @words: number of word(s)
! * @data: 16 bit word(s) to write to EEPROM
*
! * Reads 16 bit word(s) from EEPROM through bit-bang method
**/
! s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
! u16 words, u16 *data)
{
! s32 status = IXGBE_SUCCESS;
! u16 i, count;
! DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
hw->eeprom.ops.init_params(hw);
! if (words == 0) {
! status = IXGBE_ERR_INVALID_ARGUMENT;
! goto out;
! }
!
! if (offset + words > hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
+ /*
+ * The EEPROM page size cannot be queried from the chip. We do lazy
+ * initialization. It is worth to do that when we write large buffer.
+ */
+ if ((hw->eeprom.word_page_size == 0) &&
+ (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
+ ixgbe_detect_eeprom_page_size_generic(hw, offset);
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != IXGBE_SUCCESS)
+ break;
+ }
+
+ out:
+ return status;
+ }
+
+ /**
+ * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of word(s)
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+ {
+ s32 status;
+ u16 word;
+ u16 page_size;
+ u16 i;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
+
+ DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
+
/* Prepare the EEPROM for writing */
status = ixgbe_acquire_eeprom(hw);
if (status == IXGBE_SUCCESS) {
if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
*** 830,897 ****
status = IXGBE_ERR_EEPROM;
}
}
if (status == IXGBE_SUCCESS) {
ixgbe_standby_eeprom(hw);
/* Send the WRITE ENABLE command (8 bit opcode ) */
! ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
IXGBE_EEPROM_OPCODE_BITS);
ixgbe_standby_eeprom(hw);
/*
! * Some SPI eeproms use the 8th address bit embedded in the
! * opcode
*/
! if ((hw->eeprom.address_bits == 8) && (offset >= 128))
write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
/* Send the Write command (8-bit opcode + addr) */
ixgbe_shift_out_eeprom_bits(hw, write_opcode,
IXGBE_EEPROM_OPCODE_BITS);
! ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
hw->eeprom.address_bits);
! /* Send the data */
! data = (data >> 8) | (data << 8);
! ixgbe_shift_out_eeprom_bits(hw, data, 16);
! ixgbe_standby_eeprom(hw);
/* Done with writing - release the EEPROM */
ixgbe_release_eeprom(hw);
}
- out:
return status;
}
/**
! * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
* @hw: pointer to hardware structure
! * @offset: offset within the EEPROM to be read
! * @data: read 16 bit value from EEPROM
*
! * Reads 16 bit value from EEPROM through bit-bang method
**/
! s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
! u16 *data)
{
s32 status;
- u16 word_in;
- u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
! DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
hw->eeprom.ops.init_params(hw);
if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
/* Prepare the EEPROM for reading */
status = ixgbe_acquire_eeprom(hw);
if (status == IXGBE_SUCCESS) {
if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
--- 1020,1176 ----
status = IXGBE_ERR_EEPROM;
}
}
if (status == IXGBE_SUCCESS) {
+ for (i = 0; i < words; i++) {
ixgbe_standby_eeprom(hw);
/* Send the WRITE ENABLE command (8 bit opcode ) */
! ixgbe_shift_out_eeprom_bits(hw,
! IXGBE_EEPROM_WREN_OPCODE_SPI,
IXGBE_EEPROM_OPCODE_BITS);
ixgbe_standby_eeprom(hw);
/*
! * Some SPI eeproms use the 8th address bit embedded
! * in the opcode
*/
! if ((hw->eeprom.address_bits == 8) &&
! ((offset + i) >= 128))
write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
/* Send the Write command (8-bit opcode + addr) */
ixgbe_shift_out_eeprom_bits(hw, write_opcode,
IXGBE_EEPROM_OPCODE_BITS);
! ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
hw->eeprom.address_bits);
! page_size = hw->eeprom.word_page_size;
+ /* Send the data in burst via SPI*/
+ do {
+ word = data[i];
+ word = (word >> 8) | (word << 8);
+ ixgbe_shift_out_eeprom_bits(hw, word, 16);
+
+ if (page_size == 0)
+ break;
+
+ /* do not wrap around page */
+ if (((offset + i) & (page_size - 1)) ==
+ (page_size - 1))
+ break;
+ } while (++i < words);
+
+ ixgbe_standby_eeprom(hw);
+ msec_delay(10);
+ }
/* Done with writing - release the EEPROM */
ixgbe_release_eeprom(hw);
}
return status;
}
/**
! * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
* @hw: pointer to hardware structure
! * @offset: offset within the EEPROM to be written to
! * @data: 16 bit word to be written to the EEPROM
*
! * If ixgbe_eeprom_update_checksum is not called after this function, the
! * EEPROM will most likely contain an invalid checksum.
**/
! s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
s32 status;
! DEBUGFUNC("ixgbe_write_eeprom_generic");
hw->eeprom.ops.init_params(hw);
if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
+
+ out:
+ return status;
+ }
+
+ /**
+ * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit words(s) from EEPROM
+ * @words: number of word(s)
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+ {
+ s32 status = IXGBE_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset + words > hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != IXGBE_SUCCESS)
+ break;
+ }
+
+ out:
+ return status;
+ }
+
+ /**
+ * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @words: number of word(s)
+ * @data: read 16 bit word(s) from EEPROM
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+ {
+ s32 status;
+ u16 word_in;
+ u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
+
/* Prepare the EEPROM for reading */
status = ixgbe_acquire_eeprom(hw);
if (status == IXGBE_SUCCESS) {
if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
*** 899,997 ****
status = IXGBE_ERR_EEPROM;
}
}
if (status == IXGBE_SUCCESS) {
ixgbe_standby_eeprom(hw);
-
/*
! * Some SPI eeproms use the 8th address bit embedded in the
! * opcode
*/
! if ((hw->eeprom.address_bits == 8) && (offset >= 128))
read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
/* Send the READ command (opcode + addr) */
ixgbe_shift_out_eeprom_bits(hw, read_opcode,
IXGBE_EEPROM_OPCODE_BITS);
! ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
hw->eeprom.address_bits);
/* Read the data. */
word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
! *data = (word_in >> 8) | (word_in << 8);
/* End this read operation */
ixgbe_release_eeprom(hw);
}
out:
return status;
}
/**
! * ixgbe_read_eerd_generic - Read EEPROM word using EERD
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
! * @data: word read from the EEPROM
*
! * Reads a 16 bit word from the EEPROM using the EERD register.
**/
! s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
u32 eerd;
! s32 status;
! DEBUGFUNC("ixgbe_read_eerd_generic");
hw->eeprom.ops.init_params(hw);
if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
! eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
IXGBE_EEPROM_RW_REG_START;
IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
! if (status == IXGBE_SUCCESS)
! *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
IXGBE_EEPROM_RW_REG_DATA);
! else
DEBUGOUT("Eeprom read timed out\n");
out:
return status;
}
/**
! * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
! * @data: word write to the EEPROM
*
! * Write a 16 bit word to the EEPROM using the EEWR register.
**/
! s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
u32 eewr;
! s32 status;
DEBUGFUNC("ixgbe_write_eewr_generic");
hw->eeprom.ops.init_params(hw);
if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
! eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
! (data << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START;
status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
if (status != IXGBE_SUCCESS) {
DEBUGOUT("Eeprom write EEWR timed out\n");
goto out;
--- 1178,1383 ----
status = IXGBE_ERR_EEPROM;
}
}
if (status == IXGBE_SUCCESS) {
+ for (i = 0; i < words; i++) {
ixgbe_standby_eeprom(hw);
/*
! * Some SPI eeproms use the 8th address bit embedded
! * in the opcode
*/
! if ((hw->eeprom.address_bits == 8) &&
! ((offset + i) >= 128))
read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
/* Send the READ command (opcode + addr) */
ixgbe_shift_out_eeprom_bits(hw, read_opcode,
IXGBE_EEPROM_OPCODE_BITS);
! ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
hw->eeprom.address_bits);
/* Read the data. */
word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
! data[i] = (word_in >> 8) | (word_in << 8);
! }
/* End this read operation */
ixgbe_release_eeprom(hw);
}
+ return status;
+ }
+
+ /**
+ * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit value from EEPROM
+ *
+ * Reads 16 bit value from EEPROM through bit-bang method
+ **/
+ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 *data)
+ {
+ s32 status;
+
+ DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+
out:
return status;
}
/**
! * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
! * @words: number of word(s)
! * @data: 16 bit word(s) from the EEPROM
*
! * Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
! s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
! u16 words, u16 *data)
{
u32 eerd;
! s32 status = IXGBE_SUCCESS;
! u32 i;
! DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
hw->eeprom.ops.init_params(hw);
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
! for (i = 0; i < words; i++) {
! eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
IXGBE_EEPROM_RW_REG_START;
IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
! if (status == IXGBE_SUCCESS) {
! data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
IXGBE_EEPROM_RW_REG_DATA);
! } else {
DEBUGOUT("Eeprom read timed out\n");
+ goto out;
+ }
+ }
+ out:
+ return status;
+ }
+ /**
+ * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be used as a scratch pad
+ *
+ * Discover EEPROM page size by writing marching data at given offset.
+ * This function is called only when we are writing a new large buffer
+ * at given offset so the data would be overwritten anyway.
+ **/
+ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+ u16 offset)
+ {
+ u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
+ s32 status = IXGBE_SUCCESS;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
+
+ for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
+ data[i] = i;
+
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
+ IXGBE_EEPROM_PAGE_SIZE_MAX, data);
+ hw->eeprom.word_page_size = 0;
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ /*
+ * When writing in burst more than the actual page size
+ * EEPROM address wraps around current page.
+ */
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
+
+ DEBUGOUT1("Detected EEPROM page size = %d words.",
+ hw->eeprom.word_page_size);
out:
return status;
}
/**
! * ixgbe_read_eerd_generic - Read EEPROM word using EERD
* @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+ {
+ return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
+ }
+
+ /**
+ * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
+ * @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
! * @words: number of word(s)
! * @data: word(s) write to the EEPROM
*
! * Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
! s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
! u16 words, u16 *data)
{
u32 eewr;
! s32 status = IXGBE_SUCCESS;
! u16 i;
DEBUGFUNC("ixgbe_write_eewr_generic");
hw->eeprom.ops.init_params(hw);
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
! for (i = 0; i < words; i++) {
! eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
! (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
! IXGBE_EEPROM_RW_REG_START;
status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
if (status != IXGBE_SUCCESS) {
DEBUGOUT("Eeprom write EEWR timed out\n");
goto out;
*** 1002,1017 ****
--- 1388,1417 ----
status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
if (status != IXGBE_SUCCESS) {
DEBUGOUT("Eeprom write EEWR timed out\n");
goto out;
}
+ }
out:
return status;
}
/**
+ * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+ {
+ return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
+ }
+
+ /**
* ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
* @hw: pointer to hardware structure
* @ee_reg: EEPROM flag for polling
*
* Polls the status bit (bit 1) of the EERD or EEWR to determine when the
*** 1053,1063 ****
u32 eec;
u32 i;
DEBUGFUNC("ixgbe_acquire_eeprom");
! if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != IXGBE_SUCCESS)
status = IXGBE_ERR_SWFW_SYNC;
if (status == IXGBE_SUCCESS) {
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
--- 1453,1464 ----
u32 eec;
u32 i;
DEBUGFUNC("ixgbe_acquire_eeprom");
! if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
! != IXGBE_SUCCESS)
status = IXGBE_ERR_SWFW_SYNC;
if (status == IXGBE_SUCCESS) {
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
*** 1076,1086 ****
if (!(eec & IXGBE_EEC_GNT)) {
eec &= ~IXGBE_EEC_REQ;
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
DEBUGOUT("Could not acquire EEPROM grant\n");
! ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
status = IXGBE_ERR_EEPROM;
}
/* Setup EEPROM for Read/Write */
if (status == IXGBE_SUCCESS) {
--- 1477,1487 ----
if (!(eec & IXGBE_EEC_GNT)) {
eec &= ~IXGBE_EEC_REQ;
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
DEBUGOUT("Could not acquire EEPROM grant\n");
! hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
status = IXGBE_ERR_EEPROM;
}
/* Setup EEPROM for Read/Write */
if (status == IXGBE_SUCCESS) {
*** 1122,1131 ****
--- 1523,1554 ----
break;
}
usec_delay(50);
}
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
+ "not granted.\n");
+ /*
+ * this release is particularly important because our attempts
+ * above to get the semaphore may have succeeded, and if there
+ * was a timeout, we should unconditionally clear the semaphore
+ * bits to free the driver to make progress
+ */
+ ixgbe_release_eeprom_semaphore(hw);
+
+ usec_delay(50);
+ /*
+ * one last try
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI))
+ status = IXGBE_SUCCESS;
+ }
+
/* Now get the semaphore between SW/FW through the SWESMBI bit */
if (status == IXGBE_SUCCESS) {
for (i = 0; i < timeout; i++) {
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
*** 1402,1412 ****
/* Stop requesting EEPROM access */
eec &= ~IXGBE_EEC_REQ;
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
! ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
/* Delay before attempt to obtain semaphore again to allow FW access */
msec_delay(hw->eeprom.semaphore_delay);
}
--- 1825,1835 ----
/* Stop requesting EEPROM access */
eec &= ~IXGBE_EEC_REQ;
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
! hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
/* Delay before attempt to obtain semaphore again to allow FW access */
msec_delay(hw->eeprom.semaphore_delay);
}
*** 1711,1721 ****
DEBUGOUT(" Clearing MTA\n");
for (i = 0; i < hw->mac.mcft_size; i++)
IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
! (void) ixgbe_init_uta_tables(hw);
return IXGBE_SUCCESS;
}
/**
--- 2134,2144 ----
DEBUGOUT(" Clearing MTA\n");
for (i = 0; i < hw->mac.mcft_size; i++)
IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
! ixgbe_init_uta_tables(hw);
return IXGBE_SUCCESS;
}
/**
*** 1901,1918 ****
* ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
* @hw: pointer to hardware structure
* @mc_addr_list: the list of new multicast addresses
* @mc_addr_count: number of addresses
* @next: iterator function to walk the multicast address list
*
! * The given list replaces any existing list. Clears the MC addrs from receive
! * address registers and the multicast table. Uses unused receive address
! * registers for the first multicast addresses, and hashes the rest into the
! * multicast table.
**/
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
! u32 mc_addr_count, ixgbe_mc_addr_itr next)
{
u32 i;
u32 vmdq;
DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
--- 2324,2341 ----
* ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
* @hw: pointer to hardware structure
* @mc_addr_list: the list of new multicast addresses
* @mc_addr_count: number of addresses
* @next: iterator function to walk the multicast address list
+ * @clear: flag, when set clears the table beforehand
*
! * When the clear flag is set, the given list replaces any existing list.
! * Hashes the given addresses into the multicast table.
**/
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
! u32 mc_addr_count, ixgbe_mc_addr_itr next,
! bool clear)
{
u32 i;
u32 vmdq;
DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
*** 1923,1934 ****
*/
hw->addr_ctrl.num_mc_addrs = mc_addr_count;
hw->addr_ctrl.mta_in_use = 0;
/* Clear mta_shadow */
DEBUGOUT(" Clearing MTA\n");
! (void) memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
/* Update mta_shadow */
for (i = 0; i < mc_addr_count; i++) {
DEBUGOUT(" Adding the multicast addresses:\n");
ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
--- 2346,2359 ----
*/
hw->addr_ctrl.num_mc_addrs = mc_addr_count;
hw->addr_ctrl.mta_in_use = 0;
/* Clear mta_shadow */
+ if (clear) {
DEBUGOUT(" Clearing MTA\n");
! memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
! }
/* Update mta_shadow */
for (i = 0; i < mc_addr_count; i++) {
DEBUGOUT(" Adding the multicast addresses:\n");
ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
*** 1985,2016 ****
}
/**
* ixgbe_fc_enable_generic - Enable flow control
* @hw: pointer to hardware structure
- * @packetbuf_num: packet buffer number (0-7)
*
* Enable flow control according to the current settings.
**/
! s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
{
s32 ret_val = IXGBE_SUCCESS;
u32 mflcn_reg, fccfg_reg;
u32 reg;
- u32 rx_pba_size;
u32 fcrtl, fcrth;
DEBUGFUNC("ixgbe_fc_enable_generic");
! /* Negotiate the fc mode to use */
! ret_val = ixgbe_fc_autoneg(hw);
! if (ret_val == IXGBE_ERR_FLOW_CONTROL)
goto out;
/* Disable any previous flow control settings */
mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
! mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
/*
--- 2410,2457 ----
}
/**
* ixgbe_fc_enable_generic - Enable flow control
* @hw: pointer to hardware structure
*
* Enable flow control according to the current settings.
**/
! s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
u32 mflcn_reg, fccfg_reg;
u32 reg;
u32 fcrtl, fcrth;
+ int i;
DEBUGFUNC("ixgbe_fc_enable_generic");
! /* Validate the water mark configuration */
! if (!hw->fc.pause_time) {
! ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
+ }
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ DEBUGOUT("Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
+ /* Negotiate the fc mode to use */
+ ixgbe_fc_autoneg(hw);
+
/* Disable any previous flow control settings */
mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
! mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
/*
*** 2062,2190 ****
/* Set 802.3x based flow control settings. */
mflcn_reg |= IXGBE_MFLCN_DPF;
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
- rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
! fcrth = (rx_pba_size - hw->fc.high_water) << 10;
! fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
! if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
! fcrth |= IXGBE_FCRTH_FCEN;
! if (hw->fc.send_xon)
! fcrtl |= IXGBE_FCRTL_XONE;
}
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
-
/* Configure pause time (2 TCs per register) */
! reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
! if ((packetbuf_num & 1) == 0)
! reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
! else
! reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
! IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
! IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
out:
return ret_val;
}
/**
! * ixgbe_fc_autoneg - Configure flow control
* @hw: pointer to hardware structure
*
! * Compares our advertised flow control capabilities to those advertised by
! * our link partner, and determines the proper flow control mode to use.
**/
! s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
! s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
! ixgbe_link_speed speed;
! bool link_up;
! DEBUGFUNC("ixgbe_fc_autoneg");
!
! if (hw->fc.disable_fc_autoneg)
! goto out;
!
/*
! * AN should have completed when the cable was plugged in.
! * Look for reasons to bail out. Bail out if:
! * - FC autoneg is disabled, or if
! * - link is not up.
! *
! * Since we're being called from an LSC, link is already known to be up.
! * So use link_up_wait_to_complete=FALSE.
*/
! hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
! if (!link_up) {
! ret_val = IXGBE_ERR_FLOW_CONTROL;
! goto out;
}
!
! switch (hw->phy.media_type) {
! /* Autoneg flow control on fiber adapters */
! case ixgbe_media_type_fiber:
! if (speed == IXGBE_LINK_SPEED_1GB_FULL)
! ret_val = ixgbe_fc_autoneg_fiber(hw);
! break;
!
! /* Autoneg flow control on backplane adapters */
! case ixgbe_media_type_backplane:
! ret_val = ixgbe_fc_autoneg_backplane(hw);
! break;
!
! /* Autoneg flow control on copper adapters */
! case ixgbe_media_type_copper:
! if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
! ret_val = ixgbe_fc_autoneg_copper(hw);
! break;
!
! default:
! break;
! }
!
! out:
! if (ret_val == IXGBE_SUCCESS) {
! hw->fc.fc_was_autonegged = TRUE;
} else {
! hw->fc.fc_was_autonegged = FALSE;
! hw->fc.current_mode = hw->fc.requested_mode;
}
! return ret_val;
}
/**
* ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
* @hw: pointer to hardware structure
- * @speed:
- * @link_up
*
* Enable flow control according on 1 gig fiber.
**/
static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
{
u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
! s32 ret_val;
/*
* On multispeed fiber at 1g, bail out if
* - link is up but AN did not complete, or if
* - link is up and AN completed but timed out
*/
linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
! if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
! ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
! ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
- }
pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
--- 2503,2616 ----
/* Set 802.3x based flow control settings. */
mflcn_reg |= IXGBE_MFLCN_DPF;
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
! /* Set up and enable Rx high/low water mark thresholds, enable XON. */
! for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
! if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
! hw->fc.high_water[i]) {
! fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
! IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
! fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
! } else {
! IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
! /*
! * In order to prevent Tx hangs when the internal Tx
! * switch is enabled we must set the high water mark
! * to the maximum FCRTH value. This allows the Tx
! * switch to function even under heavy Rx workloads.
! */
! fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
! }
! IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
}
/* Configure pause time (2 TCs per register) */
! reg = hw->fc.pause_time * 0x00010001;
! for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
! IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
! /* Configure flow control refresh threshold value */
! IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
out:
return ret_val;
}
/**
! * ixgbe_negotiate_fc - Negotiate flow control
* @hw: pointer to hardware structure
+ * @adv_reg: flow control advertised settings
+ * @lp_reg: link partner's flow control settings
+ * @adv_sym: symmetric pause bit in advertisement
+ * @adv_asm: asymmetric pause bit in advertisement
+ * @lp_sym: symmetric pause bit in link partner advertisement
+ * @lp_asm: asymmetric pause bit in link partner advertisement
*
! * Find the intersection between advertised settings and link partner's
! * advertised settings
**/
! static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
! u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
! if ((!(adv_reg)) || (!(lp_reg)))
! return IXGBE_ERR_FC_NOT_NEGOTIATED;
! if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
/*
! * Now we need to check if the user selected Rx ONLY
! * of pause frames. In this case, we had to advertise
! * FULL flow control because we could not advertise RX
! * ONLY. Hence, we must now check to see if we need to
! * turn OFF the TRANSMISSION of PAUSE frames.
*/
! if (hw->fc.requested_mode == ixgbe_fc_full) {
! hw->fc.current_mode = ixgbe_fc_full;
! DEBUGOUT("Flow Control = FULL.\n");
! } else {
! hw->fc.current_mode = ixgbe_fc_rx_pause;
! DEBUGOUT("Flow Control=RX PAUSE frames only\n");
}
! } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
! (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
! hw->fc.current_mode = ixgbe_fc_tx_pause;
! DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
! } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
! !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
! hw->fc.current_mode = ixgbe_fc_rx_pause;
! DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
} else {
! hw->fc.current_mode = ixgbe_fc_none;
! DEBUGOUT("Flow Control = NONE.\n");
}
! return IXGBE_SUCCESS;
}
/**
* ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
* @hw: pointer to hardware structure
*
* Enable flow control according on 1 gig fiber.
**/
static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
{
u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
! s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
/*
* On multispeed fiber at 1g, bail out if
* - link is up but AN did not complete, or if
* - link is up and AN completed but timed out
*/
linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
! if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
! (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
goto out;
pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
*** 2204,2237 ****
* Enable flow control according to IEEE clause 37.
**/
static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
{
u32 links2, anlp1_reg, autoc_reg, links;
! s32 ret_val;
/*
* On backplane, bail out if
* - backplane autoneg was not completed, or if
* - we are 82599 and link partner is not AN enabled
*/
links = IXGBE_READ_REG(hw, IXGBE_LINKS);
! if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
! hw->fc.fc_was_autonegged = FALSE;
! hw->fc.current_mode = hw->fc.requested_mode;
! ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
- }
if (hw->mac.type == ixgbe_mac_82599EB) {
links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
! if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
! hw->fc.fc_was_autonegged = FALSE;
! hw->fc.current_mode = hw->fc.requested_mode;
! ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
}
- }
/*
* Read the 10g AN autoc and LP ability registers and resolve
* local flow control settings accordingly
*/
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
--- 2630,2655 ----
* Enable flow control according to IEEE clause 37.
**/
static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
{
u32 links2, anlp1_reg, autoc_reg, links;
! s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
/*
* On backplane, bail out if
* - backplane autoneg was not completed, or if
* - we are 82599 and link partner is not AN enabled
*/
links = IXGBE_READ_REG(hw, IXGBE_LINKS);
! if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
goto out;
if (hw->mac.type == ixgbe_mac_82599EB) {
links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
! if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
goto out;
}
/*
* Read the 10g AN autoc and LP ability registers and resolve
* local flow control settings accordingly
*/
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
*** 2268,2493 ****
IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
}
/**
! * ixgbe_negotiate_fc - Negotiate flow control
* @hw: pointer to hardware structure
- * @adv_reg: flow control advertised settings
- * @lp_reg: link partner's flow control settings
- * @adv_sym: symmetric pause bit in advertisement
- * @adv_asm: asymmetric pause bit in advertisement
- * @lp_sym: symmetric pause bit in link partner advertisement
- * @lp_asm: asymmetric pause bit in link partner advertisement
*
! * Find the intersection between advertised settings and link partner's
! * advertised settings
**/
! static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
! u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
! if ((!(adv_reg)) || (!(lp_reg)))
! return IXGBE_ERR_FC_NOT_NEGOTIATED;
! if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
! /*
! * Now we need to check if the user selected Rx ONLY
! * of pause frames. In this case, we had to advertise
! * FULL flow control because we could not advertise RX
! * ONLY. Hence, we must now check to see if we need to
! * turn OFF the TRANSMISSION of PAUSE frames.
! */
! if (hw->fc.requested_mode == ixgbe_fc_full) {
! hw->fc.current_mode = ixgbe_fc_full;
! DEBUGOUT("Flow Control = FULL.\n");
! } else {
! hw->fc.current_mode = ixgbe_fc_rx_pause;
! DEBUGOUT("Flow Control=RX PAUSE frames only\n");
! }
! } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
! (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
! hw->fc.current_mode = ixgbe_fc_tx_pause;
! DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
! } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
! !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
! hw->fc.current_mode = ixgbe_fc_rx_pause;
! DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
! } else {
! hw->fc.current_mode = ixgbe_fc_none;
! DEBUGOUT("Flow Control = NONE.\n");
! }
! return IXGBE_SUCCESS;
! }
- /**
- * ixgbe_setup_fc - Set up flow control
- * @hw: pointer to hardware structure
- *
- * Called at init time to set up flow control.
- **/
- s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
- {
- s32 ret_val = IXGBE_SUCCESS;
- u32 reg = 0, reg_bp = 0;
- u16 reg_cu = 0;
-
- DEBUGFUNC("ixgbe_setup_fc");
-
- /* Validate the packetbuf configuration */
- if (packetbuf_num < 0 || packetbuf_num > 7) {
- DEBUGOUT1("Invalid packet buffer number [%d], expected range is"
- " 0-7\n", packetbuf_num);
- ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
- goto out;
- }
-
/*
! * Validate the water mark configuration. Zero water marks are invalid
! * because it causes the controller to just blast out fc packets.
*/
! if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
! DEBUGOUT("Invalid water mark configuration\n");
! ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
- }
! /*
! * Validate the requested mode. Strict IEEE mode does not allow
! * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
! */
! if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
! DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
! ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
- }
- /*
- * 10gig parts do not have a word in the EEPROM to determine the
- * default flow control setting, so we explicitly set it to full.
- */
- if (hw->fc.requested_mode == ixgbe_fc_default)
- hw->fc.requested_mode = ixgbe_fc_full;
-
- /*
- * Set up the 1G and 10G flow control advertisement registers so the
- * HW will be able to do fc autoneg once the cable is plugged in. If
- * we link at 10G, the 1G advertisement is harmless and vice versa.
- */
-
switch (hw->phy.media_type) {
case ixgbe_media_type_fiber:
case ixgbe_media_type_backplane:
! reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
! reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
break;
case ixgbe_media_type_copper:
! hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
! IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
break;
default:
- ;
- }
-
- /*
- * The possible values of fc.requested_mode are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but
- * we do not support receiving pause frames).
- * 3: Both Rx and Tx flow control (symmetric) are enabled.
- * other: Invalid.
- */
- switch (hw->fc.requested_mode) {
- case ixgbe_fc_none:
- /* Flow control completely disabled by software override. */
- reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
- if (hw->phy.media_type == ixgbe_media_type_backplane)
- reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
- IXGBE_AUTOC_ASM_PAUSE);
- else if (hw->phy.media_type == ixgbe_media_type_copper)
- reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
- case ixgbe_fc_rx_pause:
- /*
- * Rx Flow control is enabled and Tx Flow control is
- * disabled by software override. Since there really
- * isn't a way to advertise that we are capable of RX
- * Pause ONLY, we will advertise that we support both
- * symmetric and asymmetric Rx PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
- */
- reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
- if (hw->phy.media_type == ixgbe_media_type_backplane)
- reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
- IXGBE_AUTOC_ASM_PAUSE);
- else if (hw->phy.media_type == ixgbe_media_type_copper)
- reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
- break;
- case ixgbe_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is
- * disabled by software override.
- */
- reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
- reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
- if (hw->phy.media_type == ixgbe_media_type_backplane) {
- reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
- reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
- } else if (hw->phy.media_type == ixgbe_media_type_copper) {
- reg_cu |= (IXGBE_TAF_ASM_PAUSE);
- reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
}
- break;
- case ixgbe_fc_full:
- /* Flow control (both Rx and Tx) is enabled by SW override. */
- reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
- if (hw->phy.media_type == ixgbe_media_type_backplane)
- reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
- IXGBE_AUTOC_ASM_PAUSE);
- else if (hw->phy.media_type == ixgbe_media_type_copper)
- reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
- break;
- default:
- DEBUGOUT("Flow control param set incorrectly\n");
- ret_val = IXGBE_ERR_CONFIG;
- goto out;
- }
- /*
- * Enable auto-negotiation between the MAC & PHY;
- * the MAC will advertise clause 37 flow control.
- */
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
-
- /* Disable AN timeout */
- if (hw->fc.strict_ieee)
- reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
-
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
- DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
-
- /*
- * AUTOC restart handles negotiation of 1G and 10G on backplane
- * and copper. There is no need to set the PCS1GCTL register.
- *
- */
- if (hw->phy.media_type == ixgbe_media_type_backplane) {
- reg_bp |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
- } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
- (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
- }
-
- DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
out:
! return ret_val;
}
/**
* ixgbe_disable_pcie_master - Disable PCI-express master access
* @hw: pointer to hardware structure
--- 2686,2751 ----
IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
}
/**
! * ixgbe_fc_autoneg - Configure flow control
* @hw: pointer to hardware structure
*
! * Compares our advertised flow control capabilities to those advertised by
! * our link partner, and determines the proper flow control mode to use.
**/
! void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
! s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
! ixgbe_link_speed speed;
! bool link_up;
! DEBUGFUNC("ixgbe_fc_autoneg");
/*
! * AN should have completed when the cable was plugged in.
! * Look for reasons to bail out. Bail out if:
! * - FC autoneg is disabled, or if
! * - link is not up.
*/
! if (hw->fc.disable_fc_autoneg)
goto out;
! hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
! if (!link_up)
goto out;
switch (hw->phy.media_type) {
+ /* Autoneg flow control on fiber adapters */
case ixgbe_media_type_fiber:
+ if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+ ret_val = ixgbe_fc_autoneg_fiber(hw);
+ break;
+
+ /* Autoneg flow control on backplane adapters */
case ixgbe_media_type_backplane:
! ret_val = ixgbe_fc_autoneg_backplane(hw);
break;
+ /* Autoneg flow control on copper adapters */
case ixgbe_media_type_copper:
! if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
! ret_val = ixgbe_fc_autoneg_copper(hw);
break;
default:
break;
}
out:
! if (ret_val == IXGBE_SUCCESS) {
! hw->fc.fc_was_autonegged = TRUE;
! } else {
! hw->fc.fc_was_autonegged = FALSE;
! hw->fc.current_mode = hw->fc.requested_mode;
! }
}
/**
* ixgbe_disable_pcie_master - Disable PCI-express master access
* @hw: pointer to hardware structure
*** 2497,2578 ****
* bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
* is returned signifying master requests disabled.
**/
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
{
- u32 i;
- u32 reg_val;
- u32 number_of_queues;
s32 status = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_disable_pcie_master");
! /* Just jump out if bus mastering is already disabled */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
goto out;
! /* Disable the receive unit by stopping each queue */
! number_of_queues = hw->mac.max_rx_queues;
! for (i = 0; i < number_of_queues; i++) {
! reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
! if (reg_val & IXGBE_RXDCTL_ENABLE) {
! reg_val &= ~IXGBE_RXDCTL_ENABLE;
! IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
! }
! }
!
! reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
! reg_val |= IXGBE_CTRL_GIO_DIS;
! IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
!
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
- if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
- goto check_device_status;
usec_delay(100);
}
DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
! status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
/*
* Before proceeding, make sure that the PCIe block does not have
* transactions pending.
*/
- check_device_status:
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
! break;
! usec_delay(100);
}
- if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
! else
! goto out;
- /*
- * Two consecutive resets are required via CTRL.RST per datasheet
- * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
- * of this need. The first reset prevents new master requests from
- * being issued by our device. We then must wait 1usec for any
- * remaining completions from the PCIe bus to trickle in, and then reset
- * again to clear out any effects they may have had on our device.
- */
- hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
-
out:
return status;
}
-
/**
* ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to acquire
*
! * Acquires the SWFW semaphore thought the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
{
u32 gssr;
--- 2755,2818 ----
* bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
* is returned signifying master requests disabled.
**/
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
+ u32 i;
DEBUGFUNC("ixgbe_disable_pcie_master");
! /* Always set this bit to ensure any future transactions are blocked */
! IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
!
! /* Exit if master requets are blocked */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
goto out;
! /* Poll for master request bit to clear */
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
usec_delay(100);
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ goto out;
}
+ /*
+ * Two consecutive resets are required via CTRL.RST per datasheet
+ * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new master requests from
+ * being issued by our device. We then must wait 1usec or more for any
+ * remaining completions from the PCIe bus to trickle in, and then reset
+ * again to clear out any effects they may have had on our device.
+ */
DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
! hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
/*
* Before proceeding, make sure that the PCIe block does not have
* transactions pending.
*/
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ usec_delay(100);
if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
! goto out;
}
DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
! status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
out:
return status;
}
/**
* ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to acquire
*
! * Acquires the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
{
u32 gssr;
*** 2618,2647 ****
/**
* ixgbe_release_swfw_sync - Release SWFW semaphore
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
! * Releases the SWFW semaphore thought the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
{
u32 gssr;
u32 swmask = mask;
DEBUGFUNC("ixgbe_release_swfw_sync");
! (void) ixgbe_get_eeprom_semaphore(hw);
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
gssr &= ~swmask;
IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
ixgbe_release_eeprom_semaphore(hw);
}
/**
* ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
* @hw: pointer to hardware structure
* @regval: register value to write to RXCTRL
*
* Enables the Rx DMA unit
--- 2858,2944 ----
/**
* ixgbe_release_swfw_sync - Release SWFW semaphore
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
! * Releases the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
{
u32 gssr;
u32 swmask = mask;
DEBUGFUNC("ixgbe_release_swfw_sync");
! ixgbe_get_eeprom_semaphore(hw);
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
gssr &= ~swmask;
IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
ixgbe_release_eeprom_semaphore(hw);
}
/**
+ * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the receive data path and waits for the HW to internally empty
+ * the Rx security block
+ **/
+ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
+ {
+ #define IXGBE_MAX_SECRX_POLL 40
+
+ int i;
+ int secrxreg;
+
+ DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
+
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+ if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
+ break;
+ else
+ /* Use interrupt-safe sleep just in case */
+ usec_delay(1000);
+ }
+
+ /* For informational purposes only */
+ if (i >= IXGBE_MAX_SECRX_POLL)
+ DEBUGOUT("Rx unit being enabled before security "
+ "path fully disabled. Continuing with init.\n");
+
+ return IXGBE_SUCCESS;
+ }
+
+ /**
+ * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the receive data path.
+ **/
+ s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
+ {
+ int secrxreg;
+
+ DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
+
+ secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+ }
+
+ /**
* ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
* @hw: pointer to hardware structure
* @regval: register value to write to RXCTRL
*
* Enables the Rx DMA unit
*** 2677,2686 ****
--- 2974,2984 ----
if (!link_up) {
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
msec_delay(10);
}
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg |= IXGBE_LED_BLINK(index);
*** 2758,2768 ****
/*
* First read the EEPROM pointer to see if the MAC addresses are
* available. If they're not, no point in calling set_lan_id() here.
*/
! (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
/*
* No addresses available in this EEPROM. It's not an
* error though, so just wipe the local address and return.
--- 3056,3066 ----
/*
* First read the EEPROM pointer to see if the MAC addresses are
* available. If they're not, no point in calling set_lan_id() here.
*/
! ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
/*
* No addresses available in this EEPROM. It's not an
* error though, so just wipe the local address and return.
*** 2803,2813 ****
u8 i;
DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
/* Look for SAN mac address pointer. If not defined, return */
! (void) ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
status = IXGBE_ERR_NO_SAN_ADDR_PTR;
goto san_mac_addr_out;
}
--- 3101,3111 ----
u8 i;
DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
/* Look for SAN mac address pointer. If not defined, return */
! ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
status = IXGBE_ERR_NO_SAN_ADDR_PTR;
goto san_mac_addr_out;
}
*** 2834,2858 ****
* @hw: pointer to hardware structure
*
* Read PCIe configuration space, and get the MSI-X vector count from
* the capabilities table.
**/
! u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
{
! u32 msix_count = 64;
DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
! if (hw->mac.msix_vectors_from_pcie) {
! msix_count = IXGBE_READ_PCIE_WORD(hw,
! IXGBE_PCIE_MSIX_82599_CAPS);
msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
! /* MSI-X count is zero-based in HW, so increment to give
! * proper value */
msix_count++;
- }
return msix_count;
}
/**
* ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
--- 3132,3171 ----
* @hw: pointer to hardware structure
*
* Read PCIe configuration space, and get the MSI-X vector count from
* the capabilities table.
**/
! u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
{
! u16 msix_count = 1;
! u16 max_msix_count;
! u16 pcie_offset;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
+ default:
+ return msix_count;
+ }
+
DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
! msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
! /* MSI-X count is zero-based in HW */
msix_count++;
+ if (msix_count > max_msix_count)
+ msix_count = max_msix_count;
+
return msix_count;
}
/**
* ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
*** 2898,2915 ****
}
}
if (rar < hw->mac.rar_highwater) {
/* already there so just add to the pool bits */
! (void) ixgbe_set_vmdq(hw, rar, vmdq);
} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
/* stick it into first empty RAR slot we found */
rar = first_empty_rar;
! (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
} else if (rar == hw->mac.rar_highwater) {
/* add it to the top of the list and inc the highwater mark */
! (void) ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
hw->mac.rar_highwater++;
} else if (rar >= hw->mac.num_rar_entries) {
return IXGBE_ERR_INVALID_MAC_ADDR;
}
--- 3211,3228 ----
}
}
if (rar < hw->mac.rar_highwater) {
/* already there so just add to the pool bits */
! ixgbe_set_vmdq(hw, rar, vmdq);
} else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
/* stick it into first empty RAR slot we found */
rar = first_empty_rar;
! ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
} else if (rar == hw->mac.rar_highwater) {
/* add it to the top of the list and inc the highwater mark */
! ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
hw->mac.rar_highwater++;
} else if (rar >= hw->mac.num_rar_entries) {
return IXGBE_ERR_INVALID_MAC_ADDR;
}
*** 2916,2926 ****
/*
* If we found rar[0], make sure the default pool bit (we use pool 0)
* remains cleared to be sure default pool packets will get delivered
*/
if (rar == 0)
! (void) ixgbe_clear_vmdq(hw, rar, 0);
return rar;
}
/**
--- 3229,3239 ----
/*
* If we found rar[0], make sure the default pool bit (we use pool 0)
* remains cleared to be sure default pool packets will get delivered
*/
if (rar == 0)
! ixgbe_clear_vmdq(hw, rar, 0);
return rar;
}
/**
*** 3002,3011 ****
--- 3315,3351 ----
}
return IXGBE_SUCCESS;
}
/**
+ * This function should only be involved in the IOV mode.
+ * In IOV mode, Default pool is next pool after the number of
+ * VFs advertized and not 0.
+ * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
+ *
+ * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @vmdq: VMDq pool index
+ **/
+ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+ {
+ u32 rar = hw->mac.san_mac_rar_index;
+
+ DEBUGFUNC("ixgbe_set_vmdq_san_mac");
+
+ if (vmdq < 32) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+ }
+
+ return IXGBE_SUCCESS;
+ }
+
+ /**
* ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
* @hw: pointer to hardware structure
**/
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
{
*** 3080,3092 ****
bool vlan_on)
{
s32 regindex;
u32 bitindex;
u32 vfta;
- u32 bits;
- u32 vt;
u32 targetbit;
bool vfta_changed = FALSE;
DEBUGFUNC("ixgbe_set_vfta_generic");
if (vlan > 4095)
--- 3420,3431 ----
bool vlan_on)
{
s32 regindex;
u32 bitindex;
u32 vfta;
u32 targetbit;
+ s32 ret_val = IXGBE_SUCCESS;
bool vfta_changed = FALSE;
DEBUGFUNC("ixgbe_set_vfta_generic");
if (vlan > 4095)
*** 3120,3181 ****
vfta_changed = TRUE;
}
}
/* Part 2
! * If VT Mode is set
* Either vlan_on
* make sure the vlan is in VLVF
* set the vind bit in the matching VLVFB
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (vt & IXGBE_VT_CTL_VT_ENABLE) {
s32 vlvf_index;
vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
if (vlvf_index < 0)
return vlvf_index;
if (vlan_on) {
/* set the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
! IXGBE_VLVFB(vlvf_index*2));
bits |= (1 << vind);
IXGBE_WRITE_REG(hw,
! IXGBE_VLVFB(vlvf_index*2),
bits);
} else {
bits = IXGBE_READ_REG(hw,
! IXGBE_VLVFB((vlvf_index*2)+1));
! bits |= (1 << (vind-32));
IXGBE_WRITE_REG(hw,
! IXGBE_VLVFB((vlvf_index*2)+1),
bits);
}
} else {
/* clear the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
! IXGBE_VLVFB(vlvf_index*2));
bits &= ~(1 << vind);
IXGBE_WRITE_REG(hw,
! IXGBE_VLVFB(vlvf_index*2),
bits);
bits |= IXGBE_READ_REG(hw,
! IXGBE_VLVFB((vlvf_index*2)+1));
} else {
bits = IXGBE_READ_REG(hw,
! IXGBE_VLVFB((vlvf_index*2)+1));
! bits &= ~(1 << (vind-32));
IXGBE_WRITE_REG(hw,
! IXGBE_VLVFB((vlvf_index*2)+1),
bits);
bits |= IXGBE_READ_REG(hw,
! IXGBE_VLVFB(vlvf_index*2));
}
}
/*
* If there are still bits set in the VLVFB registers
--- 3459,3555 ----
vfta_changed = TRUE;
}
}
/* Part 2
! * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
! */
! ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
! &vfta_changed);
! if (ret_val != IXGBE_SUCCESS)
! return ret_val;
!
! if (vfta_changed)
! IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
!
! return IXGBE_SUCCESS;
! }
!
! /**
! * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
! * @hw: pointer to hardware structure
! * @vlan: VLAN id to write to VLAN filter
! * @vind: VMDq output index that maps queue to VLAN id in VFVFB
! * @vlan_on: boolean flag to turn on/off VLAN in VFVF
! * @vfta_changed: pointer to boolean flag which indicates whether VFTA
! * should be changed
! *
! * Turn on/off specified bit in VLVF table.
! **/
! s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
! bool vlan_on, bool *vfta_changed)
! {
! u32 vt;
!
! DEBUGFUNC("ixgbe_set_vlvf_generic");
!
! if (vlan > 4095)
! return IXGBE_ERR_PARAM;
!
! /* If VT Mode is set
* Either vlan_on
* make sure the vlan is in VLVF
* set the vind bit in the matching VLVFB
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (vt & IXGBE_VT_CTL_VT_ENABLE) {
s32 vlvf_index;
+ u32 bits;
vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
if (vlvf_index < 0)
return vlvf_index;
if (vlan_on) {
/* set the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
! IXGBE_VLVFB(vlvf_index * 2));
bits |= (1 << vind);
IXGBE_WRITE_REG(hw,
! IXGBE_VLVFB(vlvf_index * 2),
bits);
} else {
bits = IXGBE_READ_REG(hw,
! IXGBE_VLVFB((vlvf_index * 2) + 1));
! bits |= (1 << (vind - 32));
IXGBE_WRITE_REG(hw,
! IXGBE_VLVFB((vlvf_index * 2) + 1),
bits);
}
} else {
/* clear the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
! IXGBE_VLVFB(vlvf_index * 2));
bits &= ~(1 << vind);
IXGBE_WRITE_REG(hw,
! IXGBE_VLVFB(vlvf_index * 2),
bits);
bits |= IXGBE_READ_REG(hw,
! IXGBE_VLVFB((vlvf_index * 2) + 1));
} else {
bits = IXGBE_READ_REG(hw,
! IXGBE_VLVFB((vlvf_index * 2) + 1));
! bits &= ~(1 << (vind - 32));
IXGBE_WRITE_REG(hw,
! IXGBE_VLVFB((vlvf_index * 2) + 1),
bits);
bits |= IXGBE_READ_REG(hw,
! IXGBE_VLVFB(vlvf_index * 2));
}
}
/*
* If there are still bits set in the VLVFB registers
*** 3193,3216 ****
* zero.
*/
if (bits) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
(IXGBE_VLVF_VIEN | vlan));
! if (!vlan_on) {
/* someone wants to clear the vfta entry
* but some pools/VFs are still using it.
* Ignore it. */
! vfta_changed = FALSE;
}
! }
! else
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
}
- if (vfta_changed)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
-
return IXGBE_SUCCESS;
}
/**
* ixgbe_clear_vfta_generic - Clear VLAN filter table
--- 3567,3586 ----
* zero.
*/
if (bits) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
(IXGBE_VLVF_VIEN | vlan));
! if ((!vlan_on) && (vfta_changed != NULL)) {
/* someone wants to clear the vfta entry
* but some pools/VFs are still using it.
* Ignore it. */
! *vfta_changed = FALSE;
}
! } else
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
}
return IXGBE_SUCCESS;
}
/**
* ixgbe_clear_vfta_generic - Clear VLAN filter table
*** 3227,3238 ****
for (offset = 0; offset < hw->mac.vft_size; offset++)
IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
! IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
! IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
}
return IXGBE_SUCCESS;
}
--- 3597,3608 ----
for (offset = 0; offset < hw->mac.vft_size; offset++)
IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
! IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
! IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
}
return IXGBE_SUCCESS;
}
*** 3291,3306 ****
IXGBE_LINKS_SPEED_100_82599)
*speed = IXGBE_LINK_SPEED_100_FULL;
else
*speed = IXGBE_LINK_SPEED_UNKNOWN;
- /* if link is down, zero out the current_mode */
- if (*link_up == FALSE) {
- hw->fc.current_mode = ixgbe_fc_none;
- hw->fc.fc_was_autonegged = FALSE;
- }
-
return IXGBE_SUCCESS;
}
/**
* ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
--- 3661,3670 ----
*** 3397,3428 ****
out:
return status;
}
/**
- * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
- * control
- * @hw: pointer to hardware structure
- *
- * There are several phys that do not support autoneg flow control. This
- * function check the device id to see if the associated phy supports
- * autoneg flow control.
- **/
- static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
- {
-
- DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
-
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82599_T3_LOM:
- return IXGBE_SUCCESS;
- default:
- return IXGBE_ERR_FC_NOT_SUPPORTED;
- }
- }
-
- /**
* ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for anti-spoofing
* @pf: Physical Function pool - do not enable anti-spoofing for the PF
*
--- 3761,3770 ----
*** 3442,3465 ****
/*
* PFVFSPOOF register array is size 8 with 8 bits assigned to
* MAC anti-spoof enables in each register array element.
*/
! for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
- /* If not enabling anti-spoofing then done */
- if (!enable)
- return;
-
/*
* The PF should be allowed to spoof so that it can support
! * emulation mode NICs. Reset the bit assigned to the PF
*/
! pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
! pfvfspoof ^= (1 << pf_target_shift);
! IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
}
/**
* ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
* @hw: pointer to hardware structure
--- 3784,3809 ----
/*
* PFVFSPOOF register array is size 8 with 8 bits assigned to
* MAC anti-spoof enables in each register array element.
*/
! for (j = 0; j < pf_target_reg; j++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
/*
* The PF should be allowed to spoof so that it can support
! * emulation mode NICs. Do not set the bits assigned to the PF
*/
! pfvfspoof &= (1 << pf_target_shift) - 1;
! IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
!
! /*
! * Remaining pools belong to the PF so they do not need to have
! * anti-spoofing enabled.
! */
! for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
! IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
}
/**
* ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
* @hw: pointer to hardware structure
*** 3514,3530 ****
DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
/* Enable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
! regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
}
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
! regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
! IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
}
--- 3858,4158 ----
DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
/* Enable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
! regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
}
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
! regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
! IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
}
+
+ /**
+ * ixgbe_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+ {
+ u32 i;
+ u8 sum = 0;
+
+ DEBUGFUNC("ixgbe_calculate_checksum");
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+ }
+
+ /**
+ * ixgbe_host_interface_command - Issue command to manageability block
+ * @hw: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ *
+ * Communicates with the manageability block. On success return IXGBE_SUCCESS
+ * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+ u32 length)
+ {
+ u32 hicr, i, bi;
+ u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ u8 buf_len, dword_len;
+
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_host_interface_command");
+
+ if (length == 0 || length & 0x3 ||
+ length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ DEBUGOUT("Buffer length failure.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if ((hicr & IXGBE_HICR_EN) == 0) {
+ DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs */
+ dword_len = length >> 2;
+
+ /*
+ * The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < dword_len; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ i, IXGBE_CPU_TO_LE32(buffer[i]));
+
+ /* Setting this bit tells the ARC that a new command is pending. */
+ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
+
+ for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if (!(hicr & IXGBE_HICR_C))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check command successful completion. */
+ if (i == IXGBE_HI_COMMAND_TIMEOUT ||
+ (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
+ DEBUGOUT("Command has failed with no status valid.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs */
+ dword_len = hdr_size >> 2;
+
+ /* first pull in the header so we know the buffer length */
+ for (bi = 0; bi < dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ }
+
+ /* If there is any thing in data position pull it in */
+ buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
+ if (buf_len == 0)
+ goto out;
+
+ if (length < (buf_len + hdr_size)) {
+ DEBUGOUT("Buffer not large enough for reply message.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
+
+ /* Pull in the rest of the buffer (bi is where we left off)*/
+ for (; bi <= dword_len; bi++) {
+ buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ IXGBE_LE32_TO_CPUS(&buffer[bi]);
+ }
+
+ out:
+ return ret_val;
+ }
+
+ /**
+ * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return IXGBE_SUCCESS
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub)
+ {
+ struct ixgbe_hic_drv_info fw_cmd;
+ int i;
+ s32 ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
+ != IXGBE_SUCCESS) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+ fw_cmd.pad = 0;
+ fw_cmd.pad2 = 0;
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(fw_cmd));
+ if (ret_val != IXGBE_SUCCESS)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = IXGBE_SUCCESS;
+ else
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+ out:
+ return ret_val;
+ }
+
+ /**
+ * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy)
+ {
+ u32 pbsize = hw->mac.rx_pb_size;
+ int i = 0;
+ u32 rxpktsize, txpktsize, txpbthresh;
+
+ /* Reserve headroom */
+ pbsize -= headroom;
+
+ if (!num_pb)
+ num_pb = 1;
+
+ /* Divide remaining packet buffer space amongst the number of packet
+ * buffers requested using supplied strategy.
+ */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
+ * buffer with 5/8 of the packet buffer space.
+ */
+ rxpktsize = (pbsize * 5) / (num_pb * 4);
+ pbsize -= rxpktsize * (num_pb / 2);
+ rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
+ for (; i < (num_pb / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Fall through to configure remaining packet buffers */
+ case PBA_STRATEGY_EQUAL:
+ rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
+ for (; i < num_pb; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ default:
+ break;
+ }
+
+ /* Only support an equally distributed Tx packet buffer strategy. */
+ txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
+ txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
+ for (i = 0; i < num_pb; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+ }
+
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < IXGBE_MAX_PB; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+ }
+ }
+
+ /**
+ * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
+ * @hw: pointer to the hardware structure
+ *
+ * The 82599 and x540 MACs can experience issues if TX work is still pending
+ * when a reset occurs. This function prevents this by flushing the PCIe
+ * buffers on the system.
+ **/
+ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
+ {
+ u32 gcr_ext, hlreg0;
+
+ /*
+ * If double reset is not requested then all transactions should
+ * already be clear and as such there is no work to do
+ */
+ if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
+ return;
+
+ /*
+ * Set loopback enable to prevent any transmits from being sent
+ * should the link come up. This assumes that the RXCTRL.RXEN bit
+ * has already been cleared.
+ */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+
+ /* initiate cleaning flow for buffers in the PCIe transaction layer */
+ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
+ gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
+
+ /* Flush all writes and allow 20usec for all transactions to clear */
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(20);
+
+ /* restore previous register values */
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ }
+