Print this page
XXXX Intel X540 support
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -28,24 +28,22 @@
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
-/*$FreeBSD$*/
+/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.c,v 1.13 2012/07/05 20:51:44 jfv Exp $*/
#include "ixgbe_type.h"
+#include "ixgbe_82598.h"
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
-s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
-s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *link_up,
bool link_up_wait_to_complete);
@@ -56,25 +54,14 @@
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
-void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
-s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
-s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
-s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
-s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
-u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
-s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
-void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
-void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy);
/**
* ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
* @hw: pointer to the HW structure
*
@@ -115,35 +102,10 @@
gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
}
/**
- * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
- * @hw: pointer to hardware structure
- *
- * Read PCIe configuration space, and get the MSI-X vector count from
- * the capabilities table.
- **/
-u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
-{
- u32 msix_count = 18;
-
- DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
-
- if (hw->mac.msix_vectors_from_pcie) {
- msix_count = IXGBE_READ_PCIE_WORD(hw,
- IXGBE_PCIE_MSIX_82598_CAPS);
- msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
-
- /* MSI-X count is zero-based in HW, so increment to give
- * proper value */
- msix_count++;
- }
- return msix_count;
-}
-
-/**
* ixgbe_init_ops_82598 - Inits func ptrs and MAC type
* @hw: pointer to hardware structure
*
* Initialize the function pointers and assign the MAC type for 82598.
* Does not touch the hardware.
@@ -175,10 +137,11 @@
/* RAR, Multicast, VLAN */
mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
mac->ops.set_vfta = &ixgbe_set_vfta_82598;
+ mac->ops.set_vlvf = NULL;
mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
/* Flow Control */
mac->ops.fc_enable = &ixgbe_fc_enable_82598;
@@ -186,22 +149,25 @@
mac->vft_size = 128;
mac->num_rar_entries = 16;
mac->rx_pb_size = 512;
mac->max_tx_queues = 32;
mac->max_rx_queues = 64;
- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
/* SFP+ Module */
phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
/* Link */
mac->ops.check_link = &ixgbe_check_mac_link_82598;
mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
mac->ops.flap_tx_laser = NULL;
- mac->ops.get_link_capabilities =
- &ixgbe_get_link_capabilities_82598;
+ mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
+ mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = NULL;
+
return ret_val;
}
/**
* ixgbe_init_phy_ops_82598 - PHY/SFP specific init
@@ -236,14 +202,10 @@
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
&ixgbe_get_phy_firmware_version_tnx;
break;
- case ixgbe_phy_aq:
- phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_generic;
- break;
case ixgbe_phy_nl:
phy->ops.reset = &ixgbe_reset_phy_nl;
/* Call SFP+ identify routine to get the SFP+ module type */
ret_val = phy->ops.identify_sfp(hw);
@@ -291,19 +253,19 @@
/* Disable relaxed ordering */
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
- regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
/* set the completion timeout for interface */
if (ret_val == IXGBE_SUCCESS)
@@ -387,11 +349,10 @@
/* Detect if there is a copper PHY attached. */
switch (hw->phy.type) {
case ixgbe_phy_cu_unknown:
case ixgbe_phy_tn:
- case ixgbe_phy_aq:
media_type = ixgbe_media_type_copper;
goto out;
default:
break;
}
@@ -428,26 +389,45 @@
}
/**
* ixgbe_fc_enable_82598 - Enable flow control
* @hw: pointer to hardware structure
- * @packetbuf_num: packet buffer number (0-7)
*
* Enable flow control according to the current settings.
**/
-s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
u32 fctrl_reg;
u32 rmcs_reg;
u32 reg;
- u32 rx_pba_size;
+ u32 fcrtl, fcrth;
u32 link_speed = 0;
+ int i;
bool link_up;
DEBUGFUNC("ixgbe_fc_enable_82598");
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ DEBUGOUT("Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/*
* On 82598 having Rx FC on causes resets while doing 1G
* so if it's on turn it off once we know link_speed. For
* more details see 82598 Specification update.
*/
@@ -465,13 +445,11 @@
break;
}
}
/* Negotiate the fc mode to use */
- ret_val = ixgbe_fc_autoneg(hw);
- if (ret_val == IXGBE_ERR_FLOW_CONTROL)
- goto out;
+ ixgbe_fc_autoneg(hw);
/* Disable any previous flow control settings */
fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
@@ -528,35 +506,31 @@
fctrl_reg |= IXGBE_FCTRL_DPF;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
- if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
- rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+ fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+ }
- reg = (rx_pba_size - hw->fc.low_water) << 6;
- if (hw->fc.send_xon)
- reg |= IXGBE_FCRTL_XONE;
-
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
-
- reg = (rx_pba_size - hw->fc.high_water) << 6;
- reg |= IXGBE_FCRTH_FCEN;
-
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
}
/* Configure pause time (2 TCs per register) */
- reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
- if ((packetbuf_num & 1) == 0)
- reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
- else
- reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
- IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+ reg = hw->fc.pause_time * 0x00010001;
+ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
- IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
out:
return ret_val;
}
@@ -727,15 +701,10 @@
if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
(ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
*link_up = FALSE;
- /* if link is down, zero out the current_mode */
- if (*link_up == FALSE) {
- hw->fc.current_mode = ixgbe_fc_none;
- hw->fc.fc_was_autonegged = FALSE;
- }
out:
return IXGBE_SUCCESS;
}
/**
@@ -758,11 +727,11 @@
u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
DEBUGFUNC("ixgbe_setup_mac_link_82598");
/* Check to see if speed passed in is supported. */
- (void) ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
+ ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
speed &= link_capabilities;
if (speed == IXGBE_LINK_SPEED_UNKNOWN)
status = IXGBE_ERR_LINK_SETUP;
@@ -812,11 +781,11 @@
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
autoneg_wait_to_complete);
/* Set up MAC */
- (void) ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
+ ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
return status;
}
/**
@@ -838,11 +807,13 @@
u8 analog_val;
DEBUGFUNC("ixgbe_reset_hw_82598");
/* Call adapter stop to disable tx/rx and clear interrupts */
- hw->mac.ops.stop_adapter(hw);
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
/*
* Power up the Atlas Tx lanes if they are currently powered down.
* Atlas Tx lanes are powered down for MAC loopback tests, but
* they are not automatically restored on reset.
@@ -881,30 +852,23 @@
/* Init PHY and function pointers, perform SFP setup */
phy_status = hw->phy.ops.init(hw);
if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
goto reset_hw_out;
- else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
- goto no_phy_reset;
+ if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto mac_reset_top;
hw->phy.ops.reset(hw);
}
-no_phy_reset:
- /*
- * Prevent the PCI-E bus from from hanging by disabling PCI-E master
- * access and verify no pending requests before reset
- */
- (void) ixgbe_disable_pcie_master(hw);
-
mac_reset_top:
/*
* Issue global reset to the MAC. This needs to be a SW reset.
* If link reset is used, it might reset the MAC when mng is using it
*/
- ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
- IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
/* Poll for reset bit to self-clear indicating reset is complete */
for (i = 0; i < 10; i++) {
usec_delay(1);
@@ -915,25 +879,22 @@
if (ctrl & IXGBE_CTRL_RST) {
status = IXGBE_ERR_RESET_FAILED;
DEBUGOUT("Reset polling failed to complete.\n");
}
+ msec_delay(50);
+
/*
* Double resets are required for recovery from certain error
* conditions. Between resets, it is necessary to stall to allow time
- * for any pending HW events to complete. We use 1usec since that is
- * what is needed for ixgbe_disable_pcie_master(). The second reset
- * then clears out any effects of those events.
+ * for any pending HW events to complete.
*/
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
- usec_delay(1);
goto mac_reset_top;
}
- msec_delay(50);
-
gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
/*
@@ -1000,11 +961,11 @@
static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
- UNREFERENCED_PARAMETER(vmdq);
+ UNREFERENCED_1PARAMETER(vmdq);
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
DEBUGOUT1("RAR index %d is out of range.\n", rar);
return IXGBE_ERR_INVALID_ARGUMENT;
@@ -1223,11 +1184,10 @@
/* Copper PHY must be checked before AUTOC LMS to determine correct
* physical layer because 10GBase-T PHYs use LMS = KX4/KX */
switch (hw->phy.type) {
case ixgbe_phy_tn:
- case ixgbe_phy_aq:
case ixgbe_phy_cu_unknown:
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
@@ -1354,18 +1314,60 @@
/* Enable relaxed ordering */
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
- regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
+}
+
+/**
+ * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy)
+{
+ u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
+ u8 i = 0;
+ UNREFERENCED_1PARAMETER(headroom);
+
+ if (!num_pb)
+ return;
+
+ /* Setup Rx packet buffer sizes */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* Setup the first four at 80KB */
+ rxpktsize = IXGBE_RXPBSIZE_80KB;
+ for (; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Setup the last four at 48KB...don't re-init i */
+ rxpktsize = IXGBE_RXPBSIZE_48KB;
+ /* Fall Through */
+ case PBA_STRATEGY_EQUAL:
+ default:
+ /* Divide the remaining Rx packet buffer evenly among the TCs */
+ for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ }
+
+ /* Setup Tx packet buffer sizes */
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
+
+ return;
}