Print this page
XXXX Intel X540 support

*** 23,32 **** --- 23,34 ---- * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. */ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Joyent, Inc. All rights reserved. + * Copyright 2012 Nexenta Systems, Inc. All rights reserved. */ #include "ixgbe_sw.h" static char ixgbe_ident[] = "Intel 10Gb Ethernet";
*** 289,298 **** --- 291,330 ---- | IXGBE_FLAG_VMDQ_CAPABLE | IXGBE_FLAG_RSC_CAPABLE | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */ }; + static adapter_info_t ixgbe_X540_cap = { + 128, /* maximum number of rx queues */ + 1, /* minimum number of rx queues */ + 128, /* default number of rx queues */ + 64, /* maximum number of rx groups */ + 1, /* minimum number of rx groups */ + 1, /* default number of rx groups */ + 128, /* maximum number of tx queues */ + 1, /* minimum number of tx queues */ + 8, /* default number of tx queues */ + 15500, /* maximum MTU size */ + 0xFF8, /* maximum interrupt throttle rate */ + 0, /* minimum interrupt throttle rate */ + 200, /* default interrupt throttle rate */ + 64, /* maximum total msix vectors */ + 16, /* maximum number of ring vectors */ + 2, /* maximum number of other vectors */ + (IXGBE_EICR_LSC + | IXGBE_EICR_GPI_SDP1 + | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */ + + (IXGBE_SDP1_GPIEN + | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */ + + (IXGBE_FLAG_DCA_CAPABLE + | IXGBE_FLAG_RSS_CAPABLE + | IXGBE_FLAG_VMDQ_CAPABLE + | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ + }; + /* * Module Initialization Functions. */ int
*** 865,874 **** --- 897,915 ---- ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0; ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN; } break; + case ixgbe_mac_X540: + IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n"); + ixgbe->capab = &ixgbe_X540_cap; + /* + * For now, X540 is all set in its capab structure. + * As other X540 variants show up, things can change here. + */ + break; + default: IXGBE_DEBUGLOG_1(ixgbe, "adapter not supported in ixgbe_identify_hardware(): %d\n", hw->mac.type); return (IXGBE_FAILURE);
*** 1198,1209 **** /* * Setup default flow control thresholds - enable/disable * & flow control type is controlled by ixgbe.conf */ ! hw->fc.high_water = DEFAULT_FCRTH; ! hw->fc.low_water = DEFAULT_FCRTL; hw->fc.pause_time = DEFAULT_FCPAUSE; hw->fc.send_xon = B_TRUE; /* * Initialize link settings --- 1239,1250 ---- /* * Setup default flow control thresholds - enable/disable * & flow control type is controlled by ixgbe.conf */ ! hw->fc.high_water[0] = DEFAULT_FCRTH; ! hw->fc.low_water[0] = DEFAULT_FCRTL; hw->fc.pause_time = DEFAULT_FCPAUSE; hw->fc.send_xon = B_TRUE; /* * Initialize link settings
*** 2094,2104 **** if (hw->mac.type < ixgbe_mac_82599EB) { reg_val |= 0x0020; /* pthresh */ } IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val); ! if (hw->mac.type == ixgbe_mac_82599EB) { reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); } --- 2135,2145 ---- if (hw->mac.type < ixgbe_mac_82599EB) { reg_val |= 0x0020; /* pthresh */ } IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val); ! if (hw->mac.type >= ixgbe_mac_82599EB) { reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); }
*** 2331,2346 **** /* * Turn off relaxed ordering for head write back or it will * cause problems with the tx recycling */ ! reg_val = IXGBE_READ_REG(hw, ! IXGBE_DCA_TXCTRL(tx_ring->index)); ! reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); } else { tx_ring->tbd_head_wb = NULL; } tx_ring->tbd_head = 0; tx_ring->tbd_tail = 0; --- 2372,2394 ---- /* * Turn off relaxed ordering for head write back or it will * cause problems with the tx recycling */ ! ! reg_val = (hw->mac.type == ixgbe_mac_82598EB) ? ! IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) : ! IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index)); ! reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; ! if (hw->mac.type == ixgbe_mac_82598EB) { IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); } else { + IXGBE_WRITE_REG(hw, + IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val); + } + } else { tx_ring->tbd_head_wb = NULL; } tx_ring->tbd_head = 0; tx_ring->tbd_tail = 0;
*** 2384,2393 **** --- 2432,2442 ---- IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping); break; default:
*** 2402,2411 **** --- 2451,2461 ---- case ixgbe_mac_82598EB: IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping); break; default: break;
*** 2418,2434 **** reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); /* ! * enable DMA for 82599 parts */ ! if (hw->mac.type == ixgbe_mac_82599EB) { /* DMATXCTL.TE must be set after all Tx config is complete */ reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); reg_val |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); } /* * Enabling tx queues .. * For 82599 must be done after DMATXCTL.TE is set --- 2468,2492 ---- reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); /* ! * enable DMA for 82599 and X540 parts */ ! if (hw->mac.type >= ixgbe_mac_82599EB) { /* DMATXCTL.TE must be set after all Tx config is complete */ reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); reg_val |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); + + /* Disable arbiter to set MTQC */ + reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + reg_val |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); + IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); + reg_val &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); } /* * Enabling tx queues .. * For 82599 must be done after DMATXCTL.TE is set
*** 2524,2533 **** --- 2582,2592 ---- vmdctl = IXGBE_VMD_CTL_VMDQ_EN; IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: /* * Enable VMDq-only. */ vmdctl = IXGBE_MRQC_VMDQEN; IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
*** 2617,2626 **** --- 2676,2686 ---- vmdctl = IXGBE_VMD_CTL_VMDQ_EN; IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: /* * Enable RSS & Setup RSS Hash functions */ mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
*** 2662,2672 **** rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; rxcsum &= ~IXGBE_RXCSUM_IPPCSE; IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); ! if (hw->mac.type == ixgbe_mac_82599EB) { /* * Enable Virtualization and Replication. */ vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); --- 2722,2732 ---- rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; rxcsum &= ~IXGBE_RXCSUM_IPPCSE; IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); ! if (hw->mac.type >= ixgbe_mac_82599EB) { /* * Enable Virtualization and Replication. */ vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
*** 2837,2847 **** /* * Update the multicast addresses to the MTA registers */ (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, ! ixgbe_mc_table_itr); } /* * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode). * --- 2897,2907 ---- /* * Update the multicast addresses to the MTA registers */ (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, ! ixgbe_mc_table_itr, TRUE); } /* * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode). *
*** 2873,2890 **** --- 2933,2953 ---- } break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: /* * 82599 supports the following combination: * vmdq no. x rss no. * [33..64] x [1..2] * [2..32] x [1..4] * 1 x [1..16] * However 8 rss queue per pool (vmdq) is sufficient for * most cases. + * + * For now, treat X540 like the 82599. */ ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; if (ixgbe->num_rx_groups == 1) { ixgbe->num_rx_rings = min(8, ring_per_group); } else if (ixgbe->num_rx_groups <= 32) {
*** 3043,3053 **** ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe, PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE); ! /* Head Write Back not recommended for 82599 */ if (hw->mac.type >= ixgbe_mac_82599EB) { ixgbe->tx_head_wb_enable = B_FALSE; } /* --- 3106,3116 ---- ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe, PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE); ! /* Head Write Back not recommended for 82599 and X540 */ if (hw->mac.type >= ixgbe_mac_82599EB) { ixgbe->tx_head_wb_enable = B_FALSE; } /*
*** 3067,3079 **** if (ixgbe->rx_hcksum_enable == B_FALSE) { ixgbe->lro_enable = B_FALSE; } /* ! * ixgbe LRO only been supported by 82599 now */ ! if (hw->mac.type != ixgbe_mac_82599EB) { ixgbe->lro_enable = B_FALSE; } ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, DEFAULT_TX_COPY_THRESHOLD); --- 3130,3142 ---- if (ixgbe->rx_hcksum_enable == B_FALSE) { ixgbe->lro_enable = B_FALSE; } /* ! * ixgbe LRO only been supported by 82599 and X540 now */ ! if (hw->mac.type < ixgbe_mac_82599EB) { ixgbe->lro_enable = B_FALSE; } ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, DEFAULT_TX_COPY_THRESHOLD);
*** 3097,3111 **** ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, ixgbe->capab->min_intr_throttle, ixgbe->capab->max_intr_throttle, ixgbe->capab->def_intr_throttle); /* ! * 82599 requires the interupt throttling rate is * a multiple of 8. This is enforced by the register * definiton. */ ! if (hw->mac.type == ixgbe_mac_82599EB) ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8; } static void ixgbe_init_params(ixgbe_t *ixgbe) --- 3160,3174 ---- ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, ixgbe->capab->min_intr_throttle, ixgbe->capab->max_intr_throttle, ixgbe->capab->def_intr_throttle); /* ! * 82599 and X540 require the interupt throttling rate is * a multiple of 8. This is enforced by the register * definiton. */ ! if (hw->mac.type >= ixgbe_mac_82599EB) ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8; } static void ixgbe_init_params(ixgbe_t *ixgbe)
*** 3227,3237 **** (void) ixgbe_check_link(hw, &speed, &link_up, false); if (link_up) { ixgbe->link_check_complete = B_TRUE; /* Link is up, enable flow control settings */ ! (void) ixgbe_fc_enable(hw, 0); /* * The Link is up, check whether it was marked as down earlier */ if (ixgbe->link_state != LINK_STATE_UP) { --- 3290,3300 ---- (void) ixgbe_check_link(hw, &speed, &link_up, false); if (link_up) { ixgbe->link_check_complete = B_TRUE; /* Link is up, enable flow control settings */ ! (void) ixgbe_fc_enable(hw); /* * The Link is up, check whether it was marked as down earlier */ if (ixgbe->link_state != LINK_STATE_UP) {
*** 3746,3755 **** --- 3809,3819 ---- case ixgbe_mac_82598EB: gpie |= ixgbe->capab->other_gpie; break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: gpie |= ixgbe->capab->other_gpie; /* Enable RSC Delay 8us when LRO enabled */ if (ixgbe->lro_enable) { gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
*** 3939,3948 **** --- 4003,4013 ---- (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, atlas); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); reg |= (IXGBE_AUTOC_FLU | IXGBE_AUTOC_10G_KX4); IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
*** 4157,4166 **** --- 4222,4232 ---- case ixgbe_mac_82598EB: ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ixgbe->eimc = IXGBE_82599_OTHER_INTR; IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); break; default:
*** 4250,4259 **** --- 4316,4326 ---- case ixgbe_mac_82598EB: ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ixgbe->eimc = IXGBE_82599_OTHER_INTR; IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); break; default:
*** 4329,4338 **** --- 4396,4406 ---- ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); ixgbe_intr_other_work(ixgbe, eicr); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; ixgbe_intr_other_work(ixgbe, eicr); break; default:
*** 4729,4738 **** --- 4797,4807 ---- ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3))); IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: if (cause == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; index = (intr_alloc_entry & 1) * 8; ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
*** 4782,4791 **** --- 4851,4861 ---- (intr_alloc_entry & 0x3))); IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: if (cause == -1) { /* other causes */ index = (intr_alloc_entry & 1) * 8; ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
*** 4831,4840 **** --- 4901,4911 ---- (intr_alloc_entry & 0x3))); IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: if (cause == -1) { /* other causes */ index = (intr_alloc_entry & 1) * 8; ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
*** 4873,4882 **** --- 4944,4954 ---- switch (hw->mac.type) { case ixgbe_mac_82598EB: return (sw_rx_index); case ixgbe_mac_82599EB: + case ixgbe_mac_X540: return (sw_rx_index * 2); default: break; }
*** 4888,4897 **** --- 4960,4970 ---- hw_rx_index = (sw_rx_index / rx_ring_per_group) * 16 + (sw_rx_index % rx_ring_per_group); return (hw_rx_index); case ixgbe_mac_82599EB: + case ixgbe_mac_X540: if (ixgbe->num_rx_groups > 32) { hw_rx_index = (sw_rx_index / rx_ring_per_group) * 2 + (sw_rx_index % rx_ring_per_group); } else {
*** 4992,5001 **** --- 5065,5075 ---- for (v_idx = 0; v_idx < 25; v_idx++) IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: for (v_idx = 0; v_idx < 64; v_idx++) IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0); break;