Print this page
    
XXXX Intel X540 support
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/io/ixgbe/ixgbe_82599.c
          +++ new/usr/src/uts/common/io/ixgbe/ixgbe_82599.c
   1    1  /******************************************************************************
   2    2  
   3    3    Copyright (c) 2001-2012, Intel Corporation 
   4    4    All rights reserved.
   5    5    
   6    6    Redistribution and use in source and binary forms, with or without 
   7    7    modification, are permitted provided that the following conditions are met:
   8    8    
   9    9     1. Redistributions of source code must retain the above copyright notice, 
  10   10        this list of conditions and the following disclaimer.
  11   11    
  12   12     2. Redistributions in binary form must reproduce the above copyright 
  13   13        notice, this list of conditions and the following disclaimer in the 
  14   14        documentation and/or other materials provided with the distribution.
  15   15    
  16   16     3. Neither the name of the Intel Corporation nor the names of its 
  17   17        contributors may be used to endorse or promote products derived from 
  18   18        this software without specific prior written permission.
  19   19    
  20   20    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  21   21    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  22   22    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
  
    | 
      ↓ open down ↓ | 
    22 lines elided | 
    
      ↑ open up ↑ | 
  
  23   23    ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
  24   24    LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
  25   25    CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
  26   26    SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
  27   27    INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
  28   28    CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
  29   29    ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  30   30    POSSIBILITY OF SUCH DAMAGE.
  31   31  
  32   32  ******************************************************************************/
  33      -/*$FreeBSD$*/
       33 +/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82599.c,v 1.8 2012/07/05 20:51:44 jfv Exp $*/
  34   34  
  35   35  #include "ixgbe_type.h"
       36 +#include "ixgbe_82599.h"
  36   37  #include "ixgbe_api.h"
  37   38  #include "ixgbe_common.h"
  38   39  #include "ixgbe_phy.h"
  39   40  
  40      -s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
  41      -s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
  42      -                                      ixgbe_link_speed *speed,
  43      -                                      bool *autoneg);
  44      -enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
  45      -void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
  46      -void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
  47      -void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
  48      -s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
  49      -                                     ixgbe_link_speed speed, bool autoneg,
  50      -                                     bool autoneg_wait_to_complete);
  51      -s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
  52      -                                     ixgbe_link_speed speed, bool autoneg,
  53      -                                     bool autoneg_wait_to_complete);
  54      -s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
  55      -                                bool autoneg_wait_to_complete);
  56      -s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
  57      -                                     ixgbe_link_speed speed,
  58      -                                     bool autoneg,
  59      -                                     bool autoneg_wait_to_complete);
  60   41  static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
  61      -                                               ixgbe_link_speed speed,
  62      -                                               bool autoneg,
  63      -                                               bool autoneg_wait_to_complete);
  64      -s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
  65      -void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
  66      -s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
  67      -s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
  68      -s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
  69      -s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
  70      -s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
  71      -s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
  72      -u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
  73      -s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
       42 +                                         ixgbe_link_speed speed,
       43 +                                         bool autoneg,
       44 +                                         bool autoneg_wait_to_complete);
  74   45  static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
  75      -bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
       46 +static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
       47 +                                   u16 offset, u16 *data);
       48 +static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
       49 +                                          u16 words, u16 *data);
  76   50  
  77      -
  78   51  void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
  79   52  {
  80   53          struct ixgbe_mac_info *mac = &hw->mac;
  81   54  
  82   55          DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
  83   56  
  84   57          /* enable the laser control functions for SFP+ fiber */
  85   58          if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
  86   59                  mac->ops.disable_tx_laser =
  87      -                                       &ixgbe_disable_tx_laser_multispeed_fiber;
       60 +                                       &ixgbe_disable_tx_laser_multispeed_fiber;
  88   61                  mac->ops.enable_tx_laser =
  89      -                                        &ixgbe_enable_tx_laser_multispeed_fiber;
       62 +                                        &ixgbe_enable_tx_laser_multispeed_fiber;
  90   63                  mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
  91   64  
  92   65          } else {
  93   66                  mac->ops.disable_tx_laser = NULL;
  94   67                  mac->ops.enable_tx_laser = NULL;
  95   68                  mac->ops.flap_tx_laser = NULL;
  96   69          }
  97   70  
  98   71          if (hw->phy.multispeed_fiber) {
  99   72                  /* Set up dual speed SFP+ support */
 100   73                  mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
 101   74          } else {
 102   75                  if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
 103   76                       (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
 104   77                        hw->phy.smart_speed == ixgbe_smart_speed_on) &&
 105   78                        !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
 106   79                          mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
 107   80                  } else {
 108   81                          mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
 109   82                  }
 110   83          }
 111   84  }
 112   85  
 113   86  /**
 114   87   *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
 115   88   *  @hw: pointer to hardware structure
 116   89   *
 117   90   *  Initialize any function pointers that were not able to be
 118   91   *  set during init_shared_code because the PHY/SFP type was
 119   92   *  not known.  Perform the SFP init if necessary.
 120   93   *
 121   94   **/
 122   95  s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
 123   96  {
 124   97          struct ixgbe_mac_info *mac = &hw->mac;
 125   98          struct ixgbe_phy_info *phy = &hw->phy;
 126   99          s32 ret_val = IXGBE_SUCCESS;
 127  100  
 128  101          DEBUGFUNC("ixgbe_init_phy_ops_82599");
 129  102  
 130  103          /* Identify the PHY or SFP module */
 131  104          ret_val = phy->ops.identify(hw);
 132  105          if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
 133  106                  goto init_phy_ops_out;
  
    | 
      ↓ open down ↓ | 
    34 lines elided | 
    
      ↑ open up ↑ | 
  
 134  107  
 135  108          /* Setup function pointers based on detected SFP module and speeds */
 136  109          ixgbe_init_mac_link_ops_82599(hw);
 137  110          if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
 138  111                  hw->phy.ops.reset = NULL;
 139  112  
 140  113          /* If copper media, overwrite with copper function pointers */
 141  114          if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
 142  115                  mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
 143  116                  mac->ops.get_link_capabilities =
 144      -                                  &ixgbe_get_copper_link_capabilities_generic;
      117 +                                  &ixgbe_get_copper_link_capabilities_generic;
 145  118          }
 146  119  
 147  120          /* Set necessary function pointers based on phy type */
 148  121          switch (hw->phy.type) {
 149  122          case ixgbe_phy_tn:
 150  123                  phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
 151  124                  phy->ops.check_link = &ixgbe_check_phy_link_tnx;
 152  125                  phy->ops.get_firmware_version =
 153      -                             &ixgbe_get_phy_firmware_version_tnx;
      126 +                             &ixgbe_get_phy_firmware_version_tnx;
 154  127                  break;
 155      -        case ixgbe_phy_aq:
 156      -                phy->ops.get_firmware_version =
 157      -                             &ixgbe_get_phy_firmware_version_generic;
 158      -                break;
 159  128          default:
 160  129                  break;
 161  130          }
 162  131  init_phy_ops_out:
 163  132          return ret_val;
 164  133  }
 165  134  
 166  135  s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 167  136  {
 168  137          s32 ret_val = IXGBE_SUCCESS;
 169  138          u32 reg_anlp1 = 0;
 170  139          u32 i = 0;
  
    | 
      ↓ open down ↓ | 
    2 lines elided | 
    
      ↑ open up ↑ | 
  
 171  140          u16 list_offset, data_offset, data_value;
 172  141  
 173  142          DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
 174  143  
 175  144          if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
 176  145                  ixgbe_init_mac_link_ops_82599(hw);
 177  146  
 178  147                  hw->phy.ops.reset = NULL;
 179  148  
 180  149                  ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
 181      -                                                              &data_offset);
      150 +                                                              &data_offset);
 182  151                  if (ret_val != IXGBE_SUCCESS)
 183  152                          goto setup_sfp_out;
 184  153  
 185  154                  /* PHY config will finish before releasing the semaphore */
 186      -                ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
      155 +                ret_val = hw->mac.ops.acquire_swfw_sync(hw,
      156 +                                                        IXGBE_GSSR_MAC_CSR_SM);
 187  157                  if (ret_val != IXGBE_SUCCESS) {
 188  158                          ret_val = IXGBE_ERR_SWFW_SYNC;
 189  159                          goto setup_sfp_out;
 190  160                  }
 191  161  
 192  162                  hw->eeprom.ops.read(hw, ++data_offset, &data_value);
 193  163                  while (data_value != 0xffff) {
 194  164                          IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
 195  165                          IXGBE_WRITE_FLUSH(hw);
 196  166                          hw->eeprom.ops.read(hw, ++data_offset, &data_value);
 197  167                  }
 198  168  
 199  169                  /* Release the semaphore */
 200      -                ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
      170 +                hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
 201  171                  /* Delay obtaining semaphore again to allow FW access */
 202  172                  msec_delay(hw->eeprom.semaphore_delay);
 203  173  
 204  174                  /* Now restart DSP by setting Restart_AN and clearing LMS */
 205  175                  IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
 206      -                                IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
 207      -                                IXGBE_AUTOC_AN_RESTART));
      176 +                                IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
      177 +                                IXGBE_AUTOC_AN_RESTART));
 208  178  
 209  179                  /* Wait for AN to leave state 0 */
 210  180                  for (i = 0; i < 10; i++) {
 211  181                          msec_delay(4);
 212  182                          reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
 213  183                          if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
 214  184                                  break;
 215  185                  }
 216  186                  if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
 217  187                          DEBUGOUT("sfp module setup not complete\n");
 218  188                          ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
 219  189                          goto setup_sfp_out;
 220  190                  }
 221  191  
 222  192                  /* Restart DSP by setting Restart_AN and return to SFI mode */
 223  193                  IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
 224      -                                IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
 225      -                                IXGBE_AUTOC_AN_RESTART));
      194 +                                IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
      195 +                                IXGBE_AUTOC_AN_RESTART));
 226  196          }
 227  197  
 228  198  setup_sfp_out:
 229  199          return ret_val;
 230  200  }
 231  201  
 232  202  /**
 233  203   *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
 234  204   *  @hw: pointer to hardware structure
 235  205   *
 236  206   *  Initialize the function pointers and assign the MAC type for 82599.
 237  207   *  Does not touch the hardware.
 238  208   **/
 239  209  
 240  210  s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
 241  211  {
 242  212          struct ixgbe_mac_info *mac = &hw->mac;
 243  213          struct ixgbe_phy_info *phy = &hw->phy;
      214 +        struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
 244  215          s32 ret_val;
 245  216  
 246  217          DEBUGFUNC("ixgbe_init_ops_82599");
 247  218  
 248  219          ret_val = ixgbe_init_phy_ops_generic(hw);
 249  220          ret_val = ixgbe_init_ops_generic(hw);
 250  221  
 251  222          /* PHY */
 252  223          phy->ops.identify = &ixgbe_identify_phy_82599;
 253  224          phy->ops.init = &ixgbe_init_phy_ops_82599;
 254  225  
 255  226          /* MAC */
 256  227          mac->ops.reset_hw = &ixgbe_reset_hw_82599;
 257  228          mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
 258  229          mac->ops.get_media_type = &ixgbe_get_media_type_82599;
 259  230          mac->ops.get_supported_physical_layer =
 260      -                                    &ixgbe_get_supported_physical_layer_82599;
      231 +                                    &ixgbe_get_supported_physical_layer_82599;
      232 +        mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
      233 +        mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
 261  234          mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
 262  235          mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
 263  236          mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
 264      -        mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599;
      237 +        mac->ops.start_hw = &ixgbe_start_hw_82599;
 265  238          mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
 266  239          mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
 267  240          mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
 268  241          mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
 269  242          mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
 270  243  
 271  244          /* RAR, Multicast, VLAN */
 272  245          mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
      246 +        mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
 273  247          mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
 274  248          mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
 275  249          mac->rar_highwater = 1;
 276  250          mac->ops.set_vfta = &ixgbe_set_vfta_generic;
      251 +        mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
 277  252          mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
 278  253          mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
 279  254          mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
 280  255          mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
 281  256          mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
 282  257  
 283  258          /* Link */
 284  259          mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
 285      -        mac->ops.check_link            = &ixgbe_check_mac_link_generic;
      260 +        mac->ops.check_link = &ixgbe_check_mac_link_generic;
      261 +        mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
 286  262          ixgbe_init_mac_link_ops_82599(hw);
 287  263  
 288      -        mac->mcft_size        = 128;
 289      -        mac->vft_size         = 128;
 290      -        mac->num_rar_entries  = 128;
 291      -        mac->rx_pb_size       = 512;
 292      -        mac->max_tx_queues    = 128;
 293      -        mac->max_rx_queues    = 128;
 294      -        mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
      264 +        mac->mcft_size          = 128;
      265 +        mac->vft_size           = 128;
      266 +        mac->num_rar_entries    = 128;
      267 +        mac->rx_pb_size         = 512;
      268 +        mac->max_tx_queues      = 128;
      269 +        mac->max_rx_queues      = 128;
      270 +        mac->max_msix_vectors   = ixgbe_get_pcie_msix_count_generic(hw);
 295  271  
      272 +        mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
      273 +                                   IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
      274 +
      275 +        hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
      276 +
      277 +        /* EEPROM */
      278 +        eeprom->ops.read = &ixgbe_read_eeprom_82599;
      279 +        eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
      280 +
      281 +        /* Manageability interface */
      282 +        mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
      283 +
      284 +
 296  285          return ret_val;
 297  286  }
 298  287  
 299  288  /**
 300  289   *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
 301  290   *  @hw: pointer to hardware structure
 302  291   *  @speed: pointer to link speed
 303  292   *  @negotiation: TRUE when autoneg or autotry is enabled
 304  293   *
 305  294   *  Determines the link capabilities by reading the AUTOC register.
 306  295   **/
 307  296  s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
 308      -                                      ixgbe_link_speed *speed,
 309      -                                      bool *negotiation)
      297 +                                      ixgbe_link_speed *speed,
      298 +                                      bool *negotiation)
 310  299  {
 311  300          s32 status = IXGBE_SUCCESS;
 312  301          u32 autoc = 0;
 313  302  
 314  303          DEBUGFUNC("ixgbe_get_link_capabilities_82599");
 315  304  
 316  305  
 317  306          /* Check if 1G SFP module. */
 318  307          if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
 319      -            hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
      308 +            hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
      309 +            hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
      310 +            hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
 320  311                  *speed = IXGBE_LINK_SPEED_1GB_FULL;
 321  312                  *negotiation = TRUE;
 322  313                  goto out;
 323  314          }
 324  315  
 325  316          /*
 326  317           * Determine link capabilities based on the stored value of AUTOC,
 327  318           * which represents EEPROM defaults.  If AUTOC value has not
 328  319           * been stored, use the current register values.
 329  320           */
 330  321          if (hw->mac.orig_link_settings_stored)
 331  322                  autoc = hw->mac.orig_autoc;
 332  323          else
 333  324                  autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 334  325  
 335  326          switch (autoc & IXGBE_AUTOC_LMS_MASK) {
 336  327          case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
 337  328                  *speed = IXGBE_LINK_SPEED_1GB_FULL;
 338  329                  *negotiation = FALSE;
 339  330                  break;
 340  331  
 341  332          case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
 342  333                  *speed = IXGBE_LINK_SPEED_10GB_FULL;
 343  334                  *negotiation = FALSE;
 344  335                  break;
 345  336  
 346  337          case IXGBE_AUTOC_LMS_1G_AN:
 347  338                  *speed = IXGBE_LINK_SPEED_1GB_FULL;
 348  339                  *negotiation = TRUE;
 349  340                  break;
 350  341  
 351  342          case IXGBE_AUTOC_LMS_10G_SERIAL:
 352  343                  *speed = IXGBE_LINK_SPEED_10GB_FULL;
 353  344                  *negotiation = FALSE;
 354  345                  break;
 355  346  
 356  347          case IXGBE_AUTOC_LMS_KX4_KX_KR:
 357  348          case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
 358  349                  *speed = IXGBE_LINK_SPEED_UNKNOWN;
 359  350                  if (autoc & IXGBE_AUTOC_KR_SUPP)
 360  351                          *speed |= IXGBE_LINK_SPEED_10GB_FULL;
 361  352                  if (autoc & IXGBE_AUTOC_KX4_SUPP)
 362  353                          *speed |= IXGBE_LINK_SPEED_10GB_FULL;
 363  354                  if (autoc & IXGBE_AUTOC_KX_SUPP)
 364  355                          *speed |= IXGBE_LINK_SPEED_1GB_FULL;
 365  356                  *negotiation = TRUE;
 366  357                  break;
 367  358  
 368  359          case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
 369  360                  *speed = IXGBE_LINK_SPEED_100_FULL;
 370  361                  if (autoc & IXGBE_AUTOC_KR_SUPP)
 371  362                          *speed |= IXGBE_LINK_SPEED_10GB_FULL;
 372  363                  if (autoc & IXGBE_AUTOC_KX4_SUPP)
 373  364                          *speed |= IXGBE_LINK_SPEED_10GB_FULL;
 374  365                  if (autoc & IXGBE_AUTOC_KX_SUPP)
 375  366                          *speed |= IXGBE_LINK_SPEED_1GB_FULL;
 376  367                  *negotiation = TRUE;
 377  368                  break;
 378  369  
 379  370          case IXGBE_AUTOC_LMS_SGMII_1G_100M:
 380  371                  *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
  
    | 
      ↓ open down ↓ | 
    51 lines elided | 
    
      ↑ open up ↑ | 
  
 381  372                  *negotiation = FALSE;
 382  373                  break;
 383  374  
 384  375          default:
 385  376                  status = IXGBE_ERR_LINK_SETUP;
 386  377                  goto out;
 387  378          }
 388  379  
 389  380          if (hw->phy.multispeed_fiber) {
 390  381                  *speed |= IXGBE_LINK_SPEED_10GB_FULL |
 391      -                          IXGBE_LINK_SPEED_1GB_FULL;
      382 +                          IXGBE_LINK_SPEED_1GB_FULL;
 392  383                  *negotiation = TRUE;
 393  384          }
 394  385  
 395  386  out:
 396  387          return status;
 397  388  }
 398  389  
 399  390  /**
 400  391   *  ixgbe_get_media_type_82599 - Get media type
 401  392   *  @hw: pointer to hardware structure
 402  393   *
 403  394   *  Returns the media type (fiber, copper, backplane)
 404  395   **/
  
    | 
      ↓ open down ↓ | 
    3 lines elided | 
    
      ↑ open up ↑ | 
  
 405  396  enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
 406  397  {
 407  398          enum ixgbe_media_type media_type;
 408  399  
 409  400          DEBUGFUNC("ixgbe_get_media_type_82599");
 410  401  
 411  402          /* Detect if there is a copper PHY attached. */
 412  403          switch (hw->phy.type) {
 413  404          case ixgbe_phy_cu_unknown:
 414  405          case ixgbe_phy_tn:
 415      -        case ixgbe_phy_aq:
 416  406                  media_type = ixgbe_media_type_copper;
 417  407                  goto out;
 418  408          default:
 419  409                  break;
 420  410          }
 421  411  
 422  412          switch (hw->device_id) {
 423  413          case IXGBE_DEV_ID_82599_KX4:
 424  414          case IXGBE_DEV_ID_82599_KX4_MEZZ:
 425  415          case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
 426  416          case IXGBE_DEV_ID_82599_KR:
 427  417          case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
 428  418          case IXGBE_DEV_ID_82599_XAUI_LOM:
 429  419                  /* Default device ID is mezzanine card KX/KX4 */
 430  420                  media_type = ixgbe_media_type_backplane;
 431  421                  break;
 432  422          case IXGBE_DEV_ID_82599_SFP:
 433  423          case IXGBE_DEV_ID_82599_SFP_FCOE:
 434  424          case IXGBE_DEV_ID_82599_SFP_EM:
 435  425          case IXGBE_DEV_ID_82599_SFP_SF2:
 436  426          case IXGBE_DEV_ID_82599EN_SFP:
 437  427                  media_type = ixgbe_media_type_fiber;
 438  428                  break;
 439  429          case IXGBE_DEV_ID_82599_CX4:
 440  430                  media_type = ixgbe_media_type_cx4;
 441  431                  break;
 442  432          case IXGBE_DEV_ID_82599_T3_LOM:
 443  433                  media_type = ixgbe_media_type_copper;
 444  434                  break;
 445  435          default:
 446  436                  media_type = ixgbe_media_type_unknown;
 447  437                  break;
 448  438          }
 449  439  out:
 450  440          return media_type;
 451  441  }
  
    | 
      ↓ open down ↓ | 
    26 lines elided | 
    
      ↑ open up ↑ | 
  
 452  442  
 453  443  /**
 454  444   *  ixgbe_start_mac_link_82599 - Setup MAC link settings
 455  445   *  @hw: pointer to hardware structure
 456  446   *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
 457  447   *
 458  448   *  Configures link settings based on values in the ixgbe_hw struct.
 459  449   *  Restarts the link.  Performs autonegotiation if needed.
 460  450   **/
 461  451  s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
 462      -                               bool autoneg_wait_to_complete)
      452 +                               bool autoneg_wait_to_complete)
 463  453  {
 464  454          u32 autoc_reg;
 465  455          u32 links_reg;
 466  456          u32 i;
 467  457          s32 status = IXGBE_SUCCESS;
 468  458  
 469  459          DEBUGFUNC("ixgbe_start_mac_link_82599");
 470  460  
 471  461  
 472  462          /* Restart link */
 473  463          autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 474  464          autoc_reg |= IXGBE_AUTOC_AN_RESTART;
 475  465          IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
 476  466  
 477  467          /* Only poll for autoneg to complete if specified to do so */
 478  468          if (autoneg_wait_to_complete) {
 479  469                  if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
 480  470                       IXGBE_AUTOC_LMS_KX4_KX_KR ||
 481  471                      (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
 482  472                       IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
 483  473                      (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
 484  474                       IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
 485  475                          links_reg = 0; /* Just in case Autoneg time = 0 */
 486  476                          for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
 487  477                                  links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
 488  478                                  if (links_reg & IXGBE_LINKS_KX_AN_COMP)
 489  479                                          break;
 490  480                                  msec_delay(100);
 491  481                          }
 492  482                          if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
 493  483                                  status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
 494  484                                  DEBUGOUT("Autoneg did not complete.\n");
 495  485                          }
 496  486                  }
 497  487          }
 498  488  
 499  489          /* Add delay to filter out noises during initial link setup */
 500  490          msec_delay(50);
 501  491  
 502  492          return status;
 503  493  }
 504  494  
 505  495  /**
 506  496   *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
 507  497   *  @hw: pointer to hardware structure
 508  498   *
 509  499   *  The base drivers may require better control over SFP+ module
 510  500   *  PHY states.  This includes selectively shutting down the Tx
 511  501   *  laser on the PHY, effectively halting physical link.
 512  502   **/
 513  503  void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 514  504  {
 515  505          u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
 516  506  
 517  507          /* Disable tx laser; allow 100us to go dark per spec */
 518  508          esdp_reg |= IXGBE_ESDP_SDP3;
 519  509          IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
 520  510          IXGBE_WRITE_FLUSH(hw);
 521  511          usec_delay(100);
 522  512  }
 523  513  
 524  514  /**
 525  515   *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
 526  516   *  @hw: pointer to hardware structure
 527  517   *
 528  518   *  The base drivers may require better control over SFP+ module
 529  519   *  PHY states.  This includes selectively turning on the Tx
 530  520   *  laser on the PHY, effectively starting physical link.
 531  521   **/
 532  522  void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 533  523  {
 534  524          u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
 535  525  
 536  526          /* Enable tx laser; allow 100ms to light up */
 537  527          esdp_reg &= ~IXGBE_ESDP_SDP3;
 538  528          IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
 539  529          IXGBE_WRITE_FLUSH(hw);
 540  530          msec_delay(100);
 541  531  }
 542  532  
 543  533  /**
 544  534   *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
 545  535   *  @hw: pointer to hardware structure
 546  536   *
 547  537   *  When the driver changes the link speeds that it can support,
 548  538   *  it sets autotry_restart to TRUE to indicate that we need to
 549  539   *  initiate a new autotry session with the link partner.  To do
 550  540   *  so, we set the speed then disable and re-enable the tx laser, to
 551  541   *  alert the link partner that it also needs to restart autotry on its
 552  542   *  end.  This is consistent with TRUE clause 37 autoneg, which also
 553  543   *  involves a loss of signal.
 554  544   **/
 555  545  void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 556  546  {
 557  547          DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
 558  548  
 559  549          if (hw->mac.autotry_restart) {
 560  550                  ixgbe_disable_tx_laser_multispeed_fiber(hw);
 561  551                  ixgbe_enable_tx_laser_multispeed_fiber(hw);
 562  552                  hw->mac.autotry_restart = FALSE;
 563  553          }
 564  554  }
 565  555  
  
    | 
      ↓ open down ↓ | 
    93 lines elided | 
    
      ↑ open up ↑ | 
  
 566  556  /**
 567  557   *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
 568  558   *  @hw: pointer to hardware structure
 569  559   *  @speed: new link speed
 570  560   *  @autoneg: TRUE if autonegotiation enabled
 571  561   *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
 572  562   *
 573  563   *  Set the link speed in the AUTOC register and restarts link.
 574  564   **/
 575  565  s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
 576      -                                     ixgbe_link_speed speed, bool autoneg,
 577      -                                     bool autoneg_wait_to_complete)
      566 +                                     ixgbe_link_speed speed, bool autoneg,
      567 +                                     bool autoneg_wait_to_complete)
 578  568  {
 579  569          s32 status = IXGBE_SUCCESS;
 580  570          ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
 581  571          ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
 582  572          u32 speedcnt = 0;
 583  573          u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
 584  574          u32 i = 0;
 585  575          bool link_up = FALSE;
 586  576          bool negotiation;
 587  577  
 588  578          DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
 589  579  
 590  580          /* Mask off requested but non-supported speeds */
 591  581          status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
 592  582          if (status != IXGBE_SUCCESS)
 593  583                  return status;
 594  584  
 595  585          speed &= link_speed;
 596  586  
 597  587          /*
 598  588           * Try each speed one by one, highest priority first.  We do this in
 599  589           * software because 10gb fiber doesn't support speed autonegotiation.
 600  590           */
 601  591          if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
 602  592                  speedcnt++;
 603  593                  highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
 604  594  
 605  595                  /* If we already have link at this speed, just jump out */
 606  596                  status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
 607  597                  if (status != IXGBE_SUCCESS)
 608  598                          return status;
 609  599  
 610  600                  if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
 611  601                          goto out;
  
    | 
      ↓ open down ↓ | 
    24 lines elided | 
    
      ↑ open up ↑ | 
  
 612  602  
 613  603                  /* Set the module link speed */
 614  604                  esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
 615  605                  IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
 616  606                  IXGBE_WRITE_FLUSH(hw);
 617  607  
 618  608                  /* Allow module to change analog characteristics (1G->10G) */
 619  609                  msec_delay(40);
 620  610  
 621  611                  status = ixgbe_setup_mac_link_82599(hw,
 622      -                                                IXGBE_LINK_SPEED_10GB_FULL,
 623      -                                                autoneg,
 624      -                                                autoneg_wait_to_complete);
      612 +                                                    IXGBE_LINK_SPEED_10GB_FULL,
      613 +                                                    autoneg,
      614 +                                                    autoneg_wait_to_complete);
 625  615                  if (status != IXGBE_SUCCESS)
 626  616                          return status;
 627  617  
 628  618                  /* Flap the tx laser if it has not already been done */
 629  619                  ixgbe_flap_tx_laser(hw);
 630  620  
 631  621                  /*
 632  622                   * Wait for the controller to acquire link.  Per IEEE 802.3ap,
 633  623                   * Section 73.10.2, we may have to wait up to 500ms if KR is
 634  624                   * attempted.  82599 uses the same timing for 10g SFI.
 635  625                   */
 636  626                  for (i = 0; i < 5; i++) {
 637  627                          /* Wait for the link partner to also set speed */
 638  628                          msec_delay(100);
 639  629  
 640  630                          /* If we have link, just jump out */
 641  631                          status = ixgbe_check_link(hw, &link_speed,
 642      -                                                  &link_up, FALSE);
      632 +                                                  &link_up, FALSE);
 643  633                          if (status != IXGBE_SUCCESS)
 644  634                                  return status;
 645  635  
 646  636                          if (link_up)
 647  637                                  goto out;
 648  638                  }
 649  639          }
 650  640  
 651  641          if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
 652  642                  speedcnt++;
 653  643                  if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
 654  644                          highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
 655  645  
 656  646                  /* If we already have link at this speed, just jump out */
 657  647                  status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
 658  648                  if (status != IXGBE_SUCCESS)
 659  649                          return status;
 660  650  
 661  651                  if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
 662  652                          goto out;
 663  653  
 664  654                  /* Set the module link speed */
 665  655                  esdp_reg &= ~IXGBE_ESDP_SDP5;
 666  656                  esdp_reg |= IXGBE_ESDP_SDP5_DIR;
 667  657                  IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
 668  658                  IXGBE_WRITE_FLUSH(hw);
 669  659  
 670  660                  /* Allow module to change analog characteristics (10G->1G) */
 671  661                  msec_delay(40);
 672  662  
 673  663                  status = ixgbe_setup_mac_link_82599(hw,
 674  664                                                      IXGBE_LINK_SPEED_1GB_FULL,
 675  665                                                      autoneg,
 676  666                                                      autoneg_wait_to_complete);
 677  667                  if (status != IXGBE_SUCCESS)
 678  668                          return status;
 679  669  
 680  670                  /* Flap the tx laser if it has not already been done */
 681  671                  ixgbe_flap_tx_laser(hw);
 682  672  
 683  673                  /* Wait for the link partner to also set speed */
 684  674                  msec_delay(100);
 685  675  
 686  676                  /* If we have link, just jump out */
 687  677                  status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
 688  678                  if (status != IXGBE_SUCCESS)
 689  679                          return status;
 690  680  
 691  681                  if (link_up)
  
    | 
      ↓ open down ↓ | 
    39 lines elided | 
    
      ↑ open up ↑ | 
  
 692  682                          goto out;
 693  683          }
 694  684  
 695  685          /*
 696  686           * We didn't get link.  Configure back to the highest speed we tried,
 697  687           * (if there was more than one).  We call ourselves back with just the
 698  688           * single highest speed that the user requested.
 699  689           */
 700  690          if (speedcnt > 1)
 701  691                  status = ixgbe_setup_mac_link_multispeed_fiber(hw,
 702      -                        highest_link_speed, autoneg, autoneg_wait_to_complete);
      692 +                        highest_link_speed, autoneg, autoneg_wait_to_complete);
 703  693  
 704  694  out:
 705  695          /* Set autoneg_advertised value based on input link speed */
 706  696          hw->phy.autoneg_advertised = 0;
 707  697  
 708  698          if (speed & IXGBE_LINK_SPEED_10GB_FULL)
 709  699                  hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
 710  700  
 711  701          if (speed & IXGBE_LINK_SPEED_1GB_FULL)
 712  702                  hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 713  703  
 714  704          return status;
 715  705  }
 716  706  
  
    | 
      ↓ open down ↓ | 
    4 lines elided | 
    
      ↑ open up ↑ | 
  
 717  707  /**
 718  708   *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
 719  709   *  @hw: pointer to hardware structure
 720  710   *  @speed: new link speed
 721  711   *  @autoneg: TRUE if autonegotiation enabled
 722  712   *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
 723  713   *
 724  714   *  Implements the Intel SmartSpeed algorithm.
 725  715   **/
 726  716  s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
 727      -                                     ixgbe_link_speed speed, bool autoneg,
 728      -                                     bool autoneg_wait_to_complete)
      717 +                                    ixgbe_link_speed speed, bool autoneg,
      718 +                                    bool autoneg_wait_to_complete)
 729  719  {
 730  720          s32 status = IXGBE_SUCCESS;
 731  721          ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
 732  722          s32 i, j;
 733  723          bool link_up = FALSE;
 734  724          u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 735  725  
 736  726          DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
 737  727  
 738  728           /* Set autoneg_advertised value based on input link speed */
 739  729          hw->phy.autoneg_advertised = 0;
 740  730  
 741  731          if (speed & IXGBE_LINK_SPEED_10GB_FULL)
 742  732                  hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
 743  733  
 744  734          if (speed & IXGBE_LINK_SPEED_1GB_FULL)
 745  735                  hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 746  736  
 747  737          if (speed & IXGBE_LINK_SPEED_100_FULL)
 748  738                  hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
 749  739  
 750  740          /*
 751  741           * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
 752  742           * autoneg advertisement if link is unable to be established at the
 753  743           * highest negotiated rate.  This can sometimes happen due to integrity
 754  744           * issues with the physical media connection.
 755  745           */
 756  746  
 757  747          /* First, try to get link with full advertisement */
 758  748          hw->phy.smart_speed_active = FALSE;
 759  749          for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
 760  750                  status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
 761  751                                                      autoneg_wait_to_complete);
 762  752                  if (status != IXGBE_SUCCESS)
 763  753                          goto out;
 764  754  
 765  755                  /*
 766  756                   * Wait for the controller to acquire link.  Per IEEE 802.3ap,
 767  757                   * Section 73.10.2, we may have to wait up to 500ms if KR is
 768  758                   * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
 769  759                   * Table 9 in the AN MAS.
 770  760                   */
 771  761                  for (i = 0; i < 5; i++) {
 772  762                          msec_delay(100);
 773  763  
 774  764                          /* If we have link, just jump out */
 775  765                          status = ixgbe_check_link(hw, &link_speed, &link_up,
 776  766                                                    FALSE);
 777  767                          if (status != IXGBE_SUCCESS)
 778  768                                  goto out;
 779  769  
 780  770                          if (link_up)
 781  771                                  goto out;
 782  772                  }
 783  773          }
 784  774  
 785  775          /*
 786  776           * We didn't get link.  If we advertised KR plus one of KX4/KX
 787  777           * (or BX4/BX), then disable KR and try again.
 788  778           */
 789  779          if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
 790  780              ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
 791  781                  goto out;
 792  782  
 793  783          /* Turn SmartSpeed on to disable KR support */
 794  784          hw->phy.smart_speed_active = TRUE;
 795  785          status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
 796  786                                              autoneg_wait_to_complete);
 797  787          if (status != IXGBE_SUCCESS)
 798  788                  goto out;
 799  789  
 800  790          /*
 801  791           * Wait for the controller to acquire link.  600ms will allow for
 802  792           * the AN link_fail_inhibit_timer as well for multiple cycles of
 803  793           * parallel detect, both 10g and 1g. This allows for the maximum
 804  794           * connect attempts as defined in the AN MAS table 73-7.
 805  795           */
 806  796          for (i = 0; i < 6; i++) {
 807  797                  msec_delay(100);
 808  798  
 809  799                  /* If we have link, just jump out */
 810  800                  status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
 811  801                  if (status != IXGBE_SUCCESS)
 812  802                          goto out;
 813  803  
 814  804                  if (link_up)
 815  805                          goto out;
 816  806          }
 817  807  
 818  808          /* We didn't get link.  Turn SmartSpeed back off. */
 819  809          hw->phy.smart_speed_active = FALSE;
 820  810          status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
 821  811                                              autoneg_wait_to_complete);
 822  812  
 823  813  out:
 824  814          if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
 825  815                  DEBUGOUT("Smartspeed has downgraded the link speed "
 826  816                  "from the maximum advertised\n");
 827  817          return status;
 828  818  }
 829  819  
  
    | 
      ↓ open down ↓ | 
    91 lines elided | 
    
      ↑ open up ↑ | 
  
 830  820  /**
 831  821   *  ixgbe_setup_mac_link_82599 - Set MAC link speed
 832  822   *  @hw: pointer to hardware structure
 833  823   *  @speed: new link speed
 834  824   *  @autoneg: TRUE if autonegotiation enabled
 835  825   *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
 836  826   *
 837  827   *  Set the link speed in the AUTOC register and restarts link.
 838  828   **/
 839  829  s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
 840      -                                     ixgbe_link_speed speed, bool autoneg,
 841      -                                     bool autoneg_wait_to_complete)
      830 +                               ixgbe_link_speed speed, bool autoneg,
      831 +                               bool autoneg_wait_to_complete)
 842  832  {
 843  833          s32 status = IXGBE_SUCCESS;
 844  834          u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 845  835          u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
 846  836          u32 start_autoc = autoc;
 847  837          u32 orig_autoc = 0;
 848  838          u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
 849  839          u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
 850  840          u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
 851  841          u32 links_reg;
 852  842          u32 i;
 853  843          ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
 854  844  
 855  845          DEBUGFUNC("ixgbe_setup_mac_link_82599");
 856  846  
 857  847          /* Check to see if speed passed in is supported. */
 858  848          status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
 859  849          if (status != IXGBE_SUCCESS)
 860  850                  goto out;
 861  851  
 862  852          speed &= link_capabilities;
 863  853  
 864  854          if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
 865  855                  status = IXGBE_ERR_LINK_SETUP;
 866  856                  goto out;
 867  857          }
 868  858  
 869  859          /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
 870  860          if (hw->mac.orig_link_settings_stored)
 871  861                  orig_autoc = hw->mac.orig_autoc;
 872  862          else
 873  863                  orig_autoc = autoc;
 874  864  
 875  865          if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
 876  866              link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
 877  867              link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
 878  868                  /* Set KX4/KX/KR support according to speed requested */
  
    | 
      ↓ open down ↓ | 
    27 lines elided | 
    
      ↑ open up ↑ | 
  
 879  869                  autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
 880  870                  if (speed & IXGBE_LINK_SPEED_10GB_FULL)
 881  871                          if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
 882  872                                  autoc |= IXGBE_AUTOC_KX4_SUPP;
 883  873                          if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
 884  874                              (hw->phy.smart_speed_active == FALSE))
 885  875                                  autoc |= IXGBE_AUTOC_KR_SUPP;
 886  876                  if (speed & IXGBE_LINK_SPEED_1GB_FULL)
 887  877                          autoc |= IXGBE_AUTOC_KX_SUPP;
 888  878          } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
 889      -                   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
 890      -                    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
      879 +                   (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
      880 +                    link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
 891  881                  /* Switch from 1G SFI to 10G SFI if requested */
 892  882                  if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
 893  883                      (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
 894  884                          autoc &= ~IXGBE_AUTOC_LMS_MASK;
 895  885                          autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
 896  886                  }
 897  887          } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
 898      -                   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
      888 +                   (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
 899  889                  /* Switch from 10G SFI to 1G SFI if requested */
 900  890                  if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
 901  891                      (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
 902  892                          autoc &= ~IXGBE_AUTOC_LMS_MASK;
 903  893                          if (autoneg)
 904  894                                  autoc |= IXGBE_AUTOC_LMS_1G_AN;
 905  895                          else
 906  896                                  autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
 907  897                  }
 908  898          }
 909  899  
 910  900          if (autoc != start_autoc) {
 911  901                  /* Restart link */
 912  902                  autoc |= IXGBE_AUTOC_AN_RESTART;
 913  903                  IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
 914  904  
 915  905                  /* Only poll for autoneg to complete if specified to do so */
 916  906                  if (autoneg_wait_to_complete) {
 917  907                          if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
 918  908                              link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
 919  909                              link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
 920  910                                  links_reg = 0; /*Just in case Autoneg time=0*/
 921  911                                  for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
 922  912                                          links_reg =
 923  913                                                 IXGBE_READ_REG(hw, IXGBE_LINKS);
 924  914                                          if (links_reg & IXGBE_LINKS_KX_AN_COMP)
 925  915                                                  break;
 926  916                                          msec_delay(100);
 927  917                                  }
 928  918                                  if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
 929  919                                          status =
 930  920                                                  IXGBE_ERR_AUTONEG_NOT_COMPLETE;
 931  921                                          DEBUGOUT("Autoneg did not complete.\n");
 932  922                                  }
 933  923                          }
 934  924                  }
 935  925  
 936  926                  /* Add delay to filter out noises during initial link setup */
 937  927                  msec_delay(50);
 938  928          }
 939  929  
 940  930  out:
 941  931          return status;
 942  932  }
 943  933  
  
    | 
      ↓ open down ↓ | 
    35 lines elided | 
    
      ↑ open up ↑ | 
  
 944  934  /**
 945  935   *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
 946  936   *  @hw: pointer to hardware structure
 947  937   *  @speed: new link speed
 948  938   *  @autoneg: TRUE if autonegotiation enabled
 949  939   *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
 950  940   *
 951  941   *  Restarts link on PHY and MAC based on settings passed in.
 952  942   **/
 953  943  static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
 954      -                                               ixgbe_link_speed speed,
 955      -                                               bool autoneg,
 956      -                                               bool autoneg_wait_to_complete)
      944 +                                         ixgbe_link_speed speed,
      945 +                                         bool autoneg,
      946 +                                         bool autoneg_wait_to_complete)
 957  947  {
 958  948          s32 status;
 959  949  
 960  950          DEBUGFUNC("ixgbe_setup_copper_link_82599");
 961  951  
 962  952          /* Setup the PHY according to input speed */
 963  953          status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
 964      -                                              autoneg_wait_to_complete);
      954 +                                              autoneg_wait_to_complete);
 965  955          /* Set up MAC */
 966      -        (void) ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
      956 +        ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
 967  957  
 968  958          return status;
 969  959  }
 970  960  
 971  961  /**
 972  962   *  ixgbe_reset_hw_82599 - Perform hardware reset
 973  963   *  @hw: pointer to hardware structure
 974  964   *
 975  965   *  Resets the hardware by resetting the transmit and receive units, masks
 976  966   *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
 977  967   *  reset.
 978  968   **/
 979  969  s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 980  970  {
 981      -        s32 status = IXGBE_SUCCESS;
 982      -        u32 ctrl;
 983      -        u32 i;
 984      -        u32 autoc;
 985      -        u32 autoc2;
      971 +        ixgbe_link_speed link_speed;
      972 +        s32 status;
      973 +        u32 ctrl, i, autoc, autoc2;
      974 +        bool link_up = FALSE;
 986  975  
 987  976          DEBUGFUNC("ixgbe_reset_hw_82599");
 988  977  
 989  978          /* Call adapter stop to disable tx/rx and clear interrupts */
 990      -        hw->mac.ops.stop_adapter(hw);
      979 +        status = hw->mac.ops.stop_adapter(hw);
      980 +        if (status != IXGBE_SUCCESS)
      981 +                goto reset_hw_out;
 991  982  
      983 +        /* flush pending Tx transactions */
      984 +        ixgbe_clear_tx_pending(hw);
      985 +
 992  986          /* PHY ops must be identified and initialized prior to reset */
 993  987  
 994  988          /* Identify PHY and related function pointers */
 995  989          status = hw->phy.ops.init(hw);
 996  990  
 997  991          if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
 998  992                  goto reset_hw_out;
 999  993  
1000  994          /* Setup SFP module if there is one present. */
1001  995          if (hw->phy.sfp_setup_needed) {
1002  996                  status = hw->mac.ops.setup_sfp(hw);
  
    | 
      ↓ open down ↓ | 
    1 lines elided | 
    
      ↑ open up ↑ | 
  
1003  997                  hw->phy.sfp_setup_needed = FALSE;
1004  998          }
1005  999  
1006 1000          if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1007 1001                  goto reset_hw_out;
1008 1002  
1009 1003          /* Reset PHY */
1010 1004          if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1011 1005                  hw->phy.ops.reset(hw);
1012 1006  
1013      -        /*
1014      -         * Prevent the PCI-E bus from from hanging by disabling PCI-E master
1015      -         * access and verify no pending requests before reset
1016      -         */
1017      -        (void) ixgbe_disable_pcie_master(hw);
1018      -
1019 1007  mac_reset_top:
1020 1008          /*
1021      -         * Issue global reset to the MAC.  This needs to be a SW reset.
1022      -         * If link reset is used, it might reset the MAC when mng is using it
     1009 +         * Issue global reset to the MAC.  Needs to be SW reset if link is up.
     1010 +         * If link reset is used when link is up, it might reset the PHY when
     1011 +         * mng is using it.  If link is down or the flag to force full link
     1012 +         * reset is set, then perform link reset.
1023 1013           */
1024      -        ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1025      -        IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
     1014 +        ctrl = IXGBE_CTRL_LNK_RST;
     1015 +        if (!hw->force_full_reset) {
     1016 +                hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
     1017 +                if (link_up)
     1018 +                        ctrl = IXGBE_CTRL_RST;
     1019 +        }
     1020 +
     1021 +        ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
     1022 +        IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1026 1023          IXGBE_WRITE_FLUSH(hw);
1027 1024  
1028 1025          /* Poll for reset bit to self-clear indicating reset is complete */
1029 1026          for (i = 0; i < 10; i++) {
1030 1027                  usec_delay(1);
1031 1028                  ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1032      -                if (!(ctrl & IXGBE_CTRL_RST))
     1029 +                if (!(ctrl & IXGBE_CTRL_RST_MASK))
1033 1030                          break;
1034 1031          }
1035      -        if (ctrl & IXGBE_CTRL_RST) {
     1032 +
     1033 +        if (ctrl & IXGBE_CTRL_RST_MASK) {
1036 1034                  status = IXGBE_ERR_RESET_FAILED;
1037 1035                  DEBUGOUT("Reset polling failed to complete.\n");
1038 1036          }
1039 1037  
     1038 +        msec_delay(50);
     1039 +
1040 1040          /*
1041 1041           * Double resets are required for recovery from certain error
1042 1042           * conditions.  Between resets, it is necessary to stall to allow time
1043      -         * for any pending HW events to complete.  We use 1usec since that is
1044      -         * what is needed for ixgbe_disable_pcie_master().  The second reset
1045      -         * then clears out any effects of those events.
     1043 +         * for any pending HW events to complete.
1046 1044           */
1047 1045          if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1048 1046                  hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1049      -                usec_delay(1);
1050 1047                  goto mac_reset_top;
1051 1048          }
1052 1049  
1053      -        msec_delay(50);
1054      -
1055 1050          /*
1056 1051           * Store the original AUTOC/AUTOC2 values if they have not been
1057 1052           * stored off yet.  Otherwise restore the stored original
1058 1053           * values since the reset operation sets back to defaults.
1059 1054           */
1060 1055          autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1061 1056          autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1062 1057          if (hw->mac.orig_link_settings_stored == FALSE) {
1063 1058                  hw->mac.orig_autoc = autoc;
1064 1059                  hw->mac.orig_autoc2 = autoc2;
1065 1060                  hw->mac.orig_link_settings_stored = TRUE;
1066 1061          } else {
1067 1062                  if (autoc != hw->mac.orig_autoc)
1068 1063                          IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
1069 1064                                          IXGBE_AUTOC_AN_RESTART));
1070 1065  
1071 1066                  if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1072 1067                      (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1073 1068                          autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1074 1069                          autoc2 |= (hw->mac.orig_autoc2 &
1075      -                                   IXGBE_AUTOC2_UPPER_MASK);
     1070 +                                   IXGBE_AUTOC2_UPPER_MASK);
1076 1071                          IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1077 1072                  }
1078 1073          }
1079 1074  
1080 1075          /* Store the permanent mac address */
1081 1076          hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1082 1077  
1083 1078          /*
1084 1079           * Store MAC address from RAR0, clear receive address registers, and
1085 1080           * clear the multicast table.  Also reset num_rar_entries to 128,
1086 1081           * since we modify this value when programming the SAN MAC address.
  
    | 
      ↓ open down ↓ | 
    1 lines elided | 
    
      ↑ open up ↑ | 
  
1087 1082           */
1088 1083          hw->mac.num_rar_entries = 128;
1089 1084          hw->mac.ops.init_rx_addrs(hw);
1090 1085  
1091 1086          /* Store the permanent SAN mac address */
1092 1087          hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1093 1088  
1094 1089          /* Add the SAN MAC address to the RAR only if it's a valid address */
1095 1090          if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1096 1091                  hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1097      -                                    hw->mac.san_addr, 0, IXGBE_RAH_AV);
     1092 +                                    hw->mac.san_addr, 0, IXGBE_RAH_AV);
1098 1093  
     1094 +                /* Save the SAN MAC RAR index */
     1095 +                hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
     1096 +
1099 1097                  /* Reserve the last RAR for the SAN MAC address */
1100 1098                  hw->mac.num_rar_entries--;
1101 1099          }
1102 1100  
1103 1101          /* Store the alternative WWNN/WWPN prefix */
1104 1102          hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1105      -                                       &hw->mac.wwpn_prefix);
     1103 +                                   &hw->mac.wwpn_prefix);
1106 1104  
1107 1105  reset_hw_out:
1108 1106          return status;
1109 1107  }
1110 1108  
1111 1109  /**
1112 1110   *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1113 1111   *  @hw: pointer to hardware structure
1114 1112   **/
1115 1113  s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1116 1114  {
1117 1115          int i;
1118 1116          u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1119 1117          fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1120 1118  
1121 1119          DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1122 1120  
1123 1121          /*
1124 1122           * Before starting reinitialization process,
  
    | 
      ↓ open down ↓ | 
    9 lines elided | 
    
      ↑ open up ↑ | 
  
1125 1123           * FDIRCMD.CMD must be zero.
1126 1124           */
1127 1125          for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1128 1126                  if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1129 1127                        IXGBE_FDIRCMD_CMD_MASK))
1130 1128                          break;
1131 1129                  usec_delay(10);
1132 1130          }
1133 1131          if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1134 1132                  DEBUGOUT("Flow Director previous command isn't complete, "
1135      -                         "aborting table re-initialization. \n");
     1133 +                         "aborting table re-initialization.\n");
1136 1134                  return IXGBE_ERR_FDIR_REINIT_FAILED;
1137 1135          }
1138 1136  
1139 1137          IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1140 1138          IXGBE_WRITE_FLUSH(hw);
1141 1139          /*
1142 1140           * 82599 adapters flow director init flow cannot be restarted,
1143 1141           * Workaround 82599 silicon errata by performing the following steps
1144 1142           * before re-writing the FDIRCTRL control register with the same value.
1145 1143           * - write 1 to bit 8 of FDIRCMD register &
1146 1144           * - write 0 to bit 8 of FDIRCMD register
1147 1145           */
1148 1146          IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1149      -                        (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1150      -                         IXGBE_FDIRCMD_CLEARHT));
     1147 +                        (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
     1148 +                         IXGBE_FDIRCMD_CLEARHT));
1151 1149          IXGBE_WRITE_FLUSH(hw);
1152 1150          IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1153      -                        (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1154      -                         ~IXGBE_FDIRCMD_CLEARHT));
     1151 +                        (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
     1152 +                         ~IXGBE_FDIRCMD_CLEARHT));
1155 1153          IXGBE_WRITE_FLUSH(hw);
1156 1154          /*
1157 1155           * Clear FDIR Hash register to clear any leftover hashes
1158 1156           * waiting to be programmed.
1159 1157           */
1160 1158          IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1161 1159          IXGBE_WRITE_FLUSH(hw);
1162 1160  
1163 1161          IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1164 1162          IXGBE_WRITE_FLUSH(hw);
1165 1163  
1166 1164          /* Poll init-done after we write FDIRCTRL register */
1167 1165          for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1168 1166                  if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1169      -                                   IXGBE_FDIRCTRL_INIT_DONE)
     1167 +                                   IXGBE_FDIRCTRL_INIT_DONE)
1170 1168                          break;
1171 1169                  usec_delay(10);
1172 1170          }
1173 1171          if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1174 1172                  DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1175 1173                  return IXGBE_ERR_FDIR_REINIT_FAILED;
1176 1174          }
1177 1175  
1178 1176          /* Clear FDIR statistics registers (read to clear) */
1179      -        (void) IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1180      -        (void) IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1181      -        (void) IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1182      -        (void) IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1183      -        (void) IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
     1177 +        IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
     1178 +        IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
     1179 +        IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
     1180 +        IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
     1181 +        IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1184 1182  
1185 1183          return IXGBE_SUCCESS;
1186 1184  }
1187 1185  
1188 1186  /**
1189      - *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
     1187 + *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1190 1188   *  @hw: pointer to hardware structure
1191      - *  @pballoc: which mode to allocate filters with
     1189 + *  @fdirctrl: value to write to flow director control register
1192 1190   **/
1193      -s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
     1191 +static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1194 1192  {
1195      -        u32 fdirctrl = 0;
1196      -        u32 pbsize;
1197 1193          int i;
1198 1194  
1199      -        DEBUGFUNC("ixgbe_init_fdir_signature_82599");
     1195 +        DEBUGFUNC("ixgbe_fdir_enable_82599");
1200 1196  
1201      -        /*
1202      -         * Before enabling Flow Director, the Rx Packet Buffer size
1203      -         * must be reduced.  The new value is the current size minus
1204      -         * flow director memory usage size.
1205      -         */
1206      -        pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1207      -        IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1208      -            (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1209      -
1210      -        /*
1211      -         * The defaults in the HW for RX PB 1-7 are not zero and so should be
1212      -         * intialized to zero for non DCB mode otherwise actual total RX PB
1213      -         * would be bigger than programmed and filter space would run into
1214      -         * the PB 0 region.
1215      -         */
1216      -        for (i = 1; i < 8; i++)
1217      -                IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1218      -
1219      -        /* Send interrupt when 64 filters are left */
1220      -        fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1221      -
1222      -        /* Set the maximum length per hash bucket to 0xA filters */
1223      -        fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
1224      -
1225      -        switch (pballoc) {
1226      -        case IXGBE_FDIR_PBALLOC_64K:
1227      -                /* 8k - 1 signature filters */
1228      -                fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1229      -                break;
1230      -        case IXGBE_FDIR_PBALLOC_128K:
1231      -                /* 16k - 1 signature filters */
1232      -                fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1233      -                break;
1234      -        case IXGBE_FDIR_PBALLOC_256K:
1235      -                /* 32k - 1 signature filters */
1236      -                fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1237      -                break;
1238      -        default:
1239      -                /* bad value */
1240      -                return IXGBE_ERR_CONFIG;
1241      -        };
1242      -
1243      -        /* Move the flexible bytes to use the ethertype - shift 6 words */
1244      -        fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1245      -
1246      -
1247 1197          /* Prime the keys for hashing */
1248 1198          IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1249 1199          IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1250 1200  
1251 1201          /*
1252 1202           * Poll init-done after we write the register.  Estimated times:
1253 1203           *      10G: PBALLOC = 11b, timing is 60us
1254 1204           *       1G: PBALLOC = 11b, timing is 600us
1255 1205           *     100M: PBALLOC = 11b, timing is 6ms
1256 1206           *
1257 1207           *     Multiple these timings by 4 if under full Rx load
  
    | 
      ↓ open down ↓ | 
    1 lines elided | 
    
      ↑ open up ↑ | 
  
1258 1208           *
1259 1209           * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1260 1210           * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1261 1211           * this might not finish in our poll time, but we can live with that
1262 1212           * for now.
1263 1213           */
1264 1214          IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1265 1215          IXGBE_WRITE_FLUSH(hw);
1266 1216          for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1267 1217                  if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1268      -                                   IXGBE_FDIRCTRL_INIT_DONE)
     1218 +                                   IXGBE_FDIRCTRL_INIT_DONE)
1269 1219                          break;
1270 1220                  msec_delay(1);
1271 1221          }
1272      -        if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1273      -                DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1274 1222  
1275      -        return IXGBE_SUCCESS;
     1223 +        if (i >= IXGBE_FDIR_INIT_DONE_POLL)
     1224 +                DEBUGOUT("Flow Director poll time exceeded!\n");
1276 1225  }
1277 1226  
1278 1227  /**
1279      - *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
     1228 + *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1280 1229   *  @hw: pointer to hardware structure
1281      - *  @pballoc: which mode to allocate filters with
     1230 + *  @fdirctrl: value to write to flow director control register, initially
     1231 + *           contains just the value of the Rx packet buffer allocation
1282 1232   **/
1283      -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
     1233 +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1284 1234  {
1285      -        u32 fdirctrl = 0;
1286      -        u32 pbsize;
1287      -        int i;
     1235 +        DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1288 1236  
1289      -        DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1290      -
1291 1237          /*
1292      -         * Before enabling Flow Director, the Rx Packet Buffer size
1293      -         * must be reduced.  The new value is the current size minus
1294      -         * flow director memory usage size.
     1238 +         * Continue setup of fdirctrl register bits:
     1239 +         *  Move the flexible bytes to use the ethertype - shift 6 words
     1240 +         *  Set the maximum length per hash bucket to 0xA filters
     1241 +         *  Send interrupt when 64 filters are left
1295 1242           */
1296      -        pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1297      -        IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1298      -            (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
     1243 +        fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
     1244 +                    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
     1245 +                    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1299 1246  
1300      -        /*
1301      -         * The defaults in the HW for RX PB 1-7 are not zero and so should be
1302      -         * intialized to zero for non DCB mode otherwise actual total RX PB
1303      -         * would be bigger than programmed and filter space would run into
1304      -         * the PB 0 region.
1305      -         */
1306      -        for (i = 1; i < 8; i++)
1307      -                IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
     1247 +        /* write hashes and fdirctrl register, poll for completion */
     1248 +        ixgbe_fdir_enable_82599(hw, fdirctrl);
1308 1249  
1309      -        /* Send interrupt when 64 filters are left */
1310      -        fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1311      -
1312      -        /* Initialize the drop queue to Rx queue 127 */
1313      -        fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1314      -
1315      -        switch (pballoc) {
1316      -        case IXGBE_FDIR_PBALLOC_64K:
1317      -                /* 2k - 1 perfect filters */
1318      -                fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1319      -                break;
1320      -        case IXGBE_FDIR_PBALLOC_128K:
1321      -                /* 4k - 1 perfect filters */
1322      -                fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1323      -                break;
1324      -        case IXGBE_FDIR_PBALLOC_256K:
1325      -                /* 8k - 1 perfect filters */
1326      -                fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1327      -                break;
1328      -        default:
1329      -                /* bad value */
1330      -                return IXGBE_ERR_CONFIG;
1331      -        };
1332      -
1333      -        /* Turn perfect match filtering on */
1334      -        fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
1335      -        fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1336      -
1337      -        /* Move the flexible bytes to use the ethertype - shift 6 words */
1338      -        fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1339      -
1340      -        /* Prime the keys for hashing */
1341      -        IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1342      -        IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,IXGBE_ATR_SIGNATURE_HASH_KEY);
1343      -
1344      -        /*
1345      -         * Poll init-done after we write the register.  Estimated times:
1346      -         *      10G: PBALLOC = 11b, timing is 60us
1347      -         *       1G: PBALLOC = 11b, timing is 600us
1348      -         *     100M: PBALLOC = 11b, timing is 6ms
1349      -         *
1350      -         *     Multiple these timings by 4 if under full Rx load
1351      -         *
1352      -         * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1353      -         * 1 msec per poll time.  If we're at line rate and drop to 100M, then
1354      -         * this might not finish in our poll time, but we can live with that
1355      -         * for now.
1356      -         */
1357      -
1358      -        /* Set the maximum length per hash bucket to 0xA filters */
1359      -        fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
1360      -
1361      -        IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1362      -        IXGBE_WRITE_FLUSH(hw);
1363      -        for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1364      -                if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1365      -                                   IXGBE_FDIRCTRL_INIT_DONE)
1366      -                        break;
1367      -                msec_delay(1);
1368      -        }
1369      -        if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1370      -                DEBUGOUT("Flow Director Perfect poll time exceeded!\n");
1371      -
1372 1250          return IXGBE_SUCCESS;
1373 1251  }
1374 1252  
1375 1253  /**
1376      - *  ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
1377      - *  @stream: input bitstream to compute the hash on
1378      - *  @key: 32-bit hash key
     1254 + *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
     1255 + *  @hw: pointer to hardware structure
     1256 + *  @fdirctrl: value to write to flow director control register, initially
     1257 + *           contains just the value of the Rx packet buffer allocation
1379 1258   **/
1380      -u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
1381      -                                 u32 key)
     1259 +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1382 1260  {
1383      -        /*
1384      -         * The algorithm is as follows:
1385      -         *    Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
1386      -         *    where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
1387      -         *    and A[n] x B[n] is bitwise AND between same length strings
1388      -         *
1389      -         *    K[n] is 16 bits, defined as:
1390      -         *       for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
1391      -         *       for n modulo 32 < 15, K[n] =
1392      -         *             K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
1393      -         *
1394      -         *    S[n] is 16 bits, defined as:
1395      -         *       for n >= 15, S[n] = S[n:n - 15]
1396      -         *       for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
1397      -         *
1398      -         *    To simplify for programming, the algorithm is implemented
1399      -         *    in software this way:
1400      -         *
1401      -         *    key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
1402      -         *
1403      -         *    for (i = 0; i < 352; i+=32)
1404      -         *        hi_hash_dword[31:0] ^= Stream[(i+31):i];
1405      -         *
1406      -         *    lo_hash_dword[15:0]  ^= Stream[15:0];
1407      -         *    lo_hash_dword[15:0]  ^= hi_hash_dword[31:16];
1408      -         *    lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
1409      -         *
1410      -         *    hi_hash_dword[31:0]  ^= Stream[351:320];
1411      -         *
1412      -         *    if(key[0])
1413      -         *        hash[15:0] ^= Stream[15:0];
1414      -         *
1415      -         *    for (i = 0; i < 16; i++) {
1416      -         *        if (key[i])
1417      -         *            hash[15:0] ^= lo_hash_dword[(i+15):i];
1418      -         *        if (key[i + 16])
1419      -         *            hash[15:0] ^= hi_hash_dword[(i+15):i];
1420      -         *    }
1421      -         *
1422      -         */
1423      -        __be32 common_hash_dword = 0;
1424      -        u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1425      -        u32 hash_result = 0;
1426      -        u8 i;
     1261 +        DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1427 1262  
1428      -        /* record the flow_vm_vlan bits as they are a key part to the hash */
1429      -        flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
1430      -
1431      -        /* generate common hash dword */
1432      -        for (i = 10; i; i -= 2)
1433      -                common_hash_dword ^= atr_input->dword_stream[i] ^
1434      -                                     atr_input->dword_stream[i - 1];
1435      -
1436      -        hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
1437      -
1438      -        /* low dword is word swapped version of common */
1439      -        lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1440      -
1441      -        /* apply flow ID/VM pool/VLAN ID bits to hash words */
1442      -        hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1443      -
1444      -        /* Process bits 0 and 16 */
1445      -        if (key & 0x0001) hash_result ^= lo_hash_dword;
1446      -        if (key & 0x00010000) hash_result ^= hi_hash_dword;
1447      -
1448 1263          /*
1449      -         * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1450      -         * delay this because bit 0 of the stream should not be processed
1451      -         * so we do not add the vlan until after bit 0 was processed
     1264 +         * Continue setup of fdirctrl register bits:
     1265 +         *  Turn perfect match filtering on
     1266 +         *  Report hash in RSS field of Rx wb descriptor
     1267 +         *  Initialize the drop queue
     1268 +         *  Move the flexible bytes to use the ethertype - shift 6 words
     1269 +         *  Set the maximum length per hash bucket to 0xA filters
     1270 +         *  Send interrupt when 64 (0x4 * 16) filters are left
1452 1271           */
1453      -        lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
     1272 +        fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
     1273 +                    IXGBE_FDIRCTRL_REPORT_STATUS |
     1274 +                    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
     1275 +                    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
     1276 +                    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
     1277 +                    (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1454 1278  
     1279 +        /* write hashes and fdirctrl register, poll for completion */
     1280 +        ixgbe_fdir_enable_82599(hw, fdirctrl);
1455 1281  
1456      -        /* process the remaining 30 bits in the key 2 bits at a time */
1457      -        for (i = 15; i; i-- ) {
1458      -                if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
1459      -                if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
1460      -        }
1461      -
1462      -        return hash_result & IXGBE_ATR_HASH_MASK;
     1282 +        return IXGBE_SUCCESS;
1463 1283  }
1464 1284  
1465 1285  /*
1466 1286   * These defines allow us to quickly generate all of the necessary instructions
1467 1287   * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1468 1288   * for values 0 through 15
1469 1289   */
1470 1290  #define IXGBE_ATR_COMMON_HASH_KEY \
1471 1291                  (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1472 1292  #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1473      -{ \
     1293 +do { \
1474 1294          u32 n = (_n); \
1475 1295          if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1476 1296                  common_hash ^= lo_hash_dword >> n; \
1477 1297          else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1478 1298                  bucket_hash ^= lo_hash_dword >> n; \
1479 1299          else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1480 1300                  sig_hash ^= lo_hash_dword << (16 - n); \
1481 1301          if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1482 1302                  common_hash ^= hi_hash_dword >> n; \
1483 1303          else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1484 1304                  bucket_hash ^= hi_hash_dword >> n; \
1485 1305          else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1486 1306                  sig_hash ^= hi_hash_dword << (16 - n); \
1487      -}
     1307 +} while (0);
1488 1308  
1489 1309  /**
1490 1310   *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1491 1311   *  @stream: input bitstream to compute the hash on
1492 1312   *
1493 1313   *  This function is almost identical to the function above but contains
1494 1314   *  several optomizations such as unwinding all of the loops, letting the
1495 1315   *  compiler work out all of the conditional ifs since the keys are static
1496 1316   *  defines, and computing two keys at once since the hashed dword stream
1497 1317   *  will be the same for both keys.
1498 1318   **/
1499      -static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1500      -                                            union ixgbe_atr_hash_dword common)
     1319 +u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
     1320 +                                     union ixgbe_atr_hash_dword common)
1501 1321  {
1502 1322          u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1503 1323          u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1504 1324  
1505 1325          /* record the flow_vm_vlan bits as they are a key part to the hash */
1506 1326          flow_vm_vlan = IXGBE_NTOHL(input.dword);
1507 1327  
1508 1328          /* generate common hash dword */
1509 1329          hi_hash_dword = IXGBE_NTOHL(common.dword);
1510 1330  
1511 1331          /* low dword is word swapped version of common */
1512 1332          lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1513 1333  
1514 1334          /* apply flow ID/VM pool/VLAN ID bits to hash words */
1515 1335          hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1516 1336  
1517 1337          /* Process bits 0 and 16 */
1518 1338          IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1519 1339  
1520 1340          /*
1521 1341           * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1522 1342           * delay this because bit 0 of the stream should not be processed
1523 1343           * so we do not add the vlan until after bit 0 was processed
1524 1344           */
1525 1345          lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1526 1346  
1527 1347          /* Process remaining 30 bit of the key */
1528 1348          IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1529 1349          IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1530 1350          IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1531 1351          IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1532 1352          IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1533 1353          IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1534 1354          IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1535 1355          IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1536 1356          IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1537 1357          IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1538 1358          IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1539 1359          IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1540 1360          IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1541 1361          IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1542 1362          IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1543 1363  
1544 1364          /* combine common_hash result with signature and bucket hashes */
1545 1365          bucket_hash ^= common_hash;
1546 1366          bucket_hash &= IXGBE_ATR_HASH_MASK;
1547 1367  
  
    | 
      ↓ open down ↓ | 
    37 lines elided | 
    
      ↑ open up ↑ | 
  
1548 1368          sig_hash ^= common_hash << 16;
1549 1369          sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1550 1370  
1551 1371          /* return completed signature hash */
1552 1372          return sig_hash ^ bucket_hash;
1553 1373  }
1554 1374  
1555 1375  /**
1556 1376   *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1557 1377   *  @hw: pointer to hardware structure
1558      - *  @stream: input bitstream
     1378 + *  @input: unique input dword
     1379 + *  @common: compressed common input dword
1559 1380   *  @queue: queue index to direct traffic to
1560 1381   **/
1561 1382  s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1562      -                                          union ixgbe_atr_hash_dword input,
1563      -                                          union ixgbe_atr_hash_dword common,
1564      -                                          u8 queue)
     1383 +                                          union ixgbe_atr_hash_dword input,
     1384 +                                          union ixgbe_atr_hash_dword common,
     1385 +                                          u8 queue)
1565 1386  {
1566 1387          u64  fdirhashcmd;
1567 1388          u32  fdircmd;
1568 1389  
1569 1390          DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1570 1391  
1571 1392          /*
1572 1393           * Get the flow_type in order to program FDIRCMD properly
1573 1394           * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1574 1395           */
1575 1396          switch (input.formatted.flow_type) {
1576 1397          case IXGBE_ATR_FLOW_TYPE_TCPV4:
1577 1398          case IXGBE_ATR_FLOW_TYPE_UDPV4:
1578 1399          case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1579 1400          case IXGBE_ATR_FLOW_TYPE_TCPV6:
  
    | 
      ↓ open down ↓ | 
    5 lines elided | 
    
      ↑ open up ↑ | 
  
1580 1401          case IXGBE_ATR_FLOW_TYPE_UDPV6:
1581 1402          case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1582 1403                  break;
1583 1404          default:
1584 1405                  DEBUGOUT(" Error on flow type input\n");
1585 1406                  return IXGBE_ERR_CONFIG;
1586 1407          }
1587 1408  
1588 1409          /* configure FDIRCMD register */
1589 1410          fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1590      -                  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
     1411 +                  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1591 1412          fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1592 1413          fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1593 1414  
1594 1415          /*
1595 1416           * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1596 1417           * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
1597 1418           */
1598 1419          fdirhashcmd = (u64)fdircmd << 32;
1599 1420          fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1600 1421          IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1601 1422  
1602 1423          DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1603 1424  
1604 1425          return IXGBE_SUCCESS;
1605 1426  }
1606 1427  
     1428 +#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
     1429 +do { \
     1430 +        u32 n = (_n); \
     1431 +        if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
     1432 +                bucket_hash ^= lo_hash_dword >> n; \
     1433 +        if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
     1434 +                bucket_hash ^= hi_hash_dword >> n; \
     1435 +} while (0);
     1436 +
1607 1437  /**
     1438 + *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
     1439 + *  @atr_input: input bitstream to compute the hash on
     1440 + *  @input_mask: mask for the input bitstream
     1441 + *
     1442 + *  This function serves two main purposes.  First it applys the input_mask
     1443 + *  to the atr_input resulting in a cleaned up atr_input data stream.
     1444 + *  Secondly it computes the hash and stores it in the bkt_hash field at
     1445 + *  the end of the input byte stream.  This way it will be available for
     1446 + *  future use without needing to recompute the hash.
     1447 + **/
     1448 +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
     1449 +                                          union ixgbe_atr_input *input_mask)
     1450 +{
     1451 +
     1452 +        u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
     1453 +        u32 bucket_hash = 0;
     1454 +
     1455 +        /* Apply masks to input data */
     1456 +        input->dword_stream[0]  &= input_mask->dword_stream[0];
     1457 +        input->dword_stream[1]  &= input_mask->dword_stream[1];
     1458 +        input->dword_stream[2]  &= input_mask->dword_stream[2];
     1459 +        input->dword_stream[3]  &= input_mask->dword_stream[3];
     1460 +        input->dword_stream[4]  &= input_mask->dword_stream[4];
     1461 +        input->dword_stream[5]  &= input_mask->dword_stream[5];
     1462 +        input->dword_stream[6]  &= input_mask->dword_stream[6];
     1463 +        input->dword_stream[7]  &= input_mask->dword_stream[7];
     1464 +        input->dword_stream[8]  &= input_mask->dword_stream[8];
     1465 +        input->dword_stream[9]  &= input_mask->dword_stream[9];
     1466 +        input->dword_stream[10] &= input_mask->dword_stream[10];
     1467 +
     1468 +        /* record the flow_vm_vlan bits as they are a key part to the hash */
     1469 +        flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
     1470 +
     1471 +        /* generate common hash dword */
     1472 +        hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
     1473 +                                    input->dword_stream[2] ^
     1474 +                                    input->dword_stream[3] ^
     1475 +                                    input->dword_stream[4] ^
     1476 +                                    input->dword_stream[5] ^
     1477 +                                    input->dword_stream[6] ^
     1478 +                                    input->dword_stream[7] ^
     1479 +                                    input->dword_stream[8] ^
     1480 +                                    input->dword_stream[9] ^
     1481 +                                    input->dword_stream[10]);
     1482 +
     1483 +        /* low dword is word swapped version of common */
     1484 +        lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
     1485 +
     1486 +        /* apply flow ID/VM pool/VLAN ID bits to hash words */
     1487 +        hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
     1488 +
     1489 +        /* Process bits 0 and 16 */
     1490 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
     1491 +
     1492 +        /*
     1493 +         * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
     1494 +         * delay this because bit 0 of the stream should not be processed
     1495 +         * so we do not add the vlan until after bit 0 was processed
     1496 +         */
     1497 +        lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
     1498 +
     1499 +        /* Process remaining 30 bit of the key */
     1500 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
     1501 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
     1502 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
     1503 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
     1504 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
     1505 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
     1506 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
     1507 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
     1508 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
     1509 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
     1510 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
     1511 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
     1512 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
     1513 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
     1514 +        IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
     1515 +
     1516 +        /*
     1517 +         * Limit hash to 13 bits since max bucket count is 8K.
     1518 +         * Store result at the end of the input stream.
     1519 +         */
     1520 +        input->formatted.bkt_hash = bucket_hash & 0x1FFF;
     1521 +}
     1522 +
     1523 +/**
1608 1524   *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1609 1525   *  @input_mask: mask to be bit swapped
1610 1526   *
1611 1527   *  The source and destination port masks for flow director are bit swapped
1612 1528   *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
1613 1529   *  generate a correctly swapped value we need to bit swap the mask and that
1614 1530   *  is what is accomplished by this function.
1615 1531   **/
1616      -static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
     1532 +static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1617 1533  {
1618      -        u32 mask = IXGBE_NTOHS(input_masks->dst_port_mask);
     1534 +        u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1619 1535          mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1620      -        mask |= IXGBE_NTOHS(input_masks->src_port_mask);
     1536 +        mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1621 1537          mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1622 1538          mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1623 1539          mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1624 1540          return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1625 1541  }
1626 1542  
1627 1543  /*
1628 1544   * These two macros are meant to address the fact that we have registers
1629 1545   * that are either all or in part big-endian.  As a result on big-endian
1630 1546   * systems we will end up byte swapping the value to little-endian before
1631 1547   * it is byte swapped again and written to the hardware in the original
  
    | 
      ↓ open down ↓ | 
    1 lines elided | 
    
      ↑ open up ↑ | 
  
1632 1548   * big-endian format.
1633 1549   */
1634 1550  #define IXGBE_STORE_AS_BE32(_value) \
1635 1551          (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1636 1552           (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1637 1553  
1638 1554  #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1639 1555          IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1640 1556  
1641 1557  #define IXGBE_STORE_AS_BE16(_value) \
1642      -        (((u16)(_value) >> 8) | ((u16)(_value) << 8))
     1558 +        IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1643 1559  
1644      -
1645      -/**
1646      - *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1647      - *  @hw: pointer to hardware structure
1648      - *  @input: input bitstream
1649      - *  @input_masks: masks for the input bitstream
1650      - *  @soft_id: software index for the filters
1651      - *  @queue: queue index to direct traffic to
1652      - *
1653      - *  Note that the caller to this function must lock before calling, since the
1654      - *  hardware writes must be protected from one another.
1655      - **/
1656      -s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1657      -                                      union ixgbe_atr_input *input,
1658      -                                      struct ixgbe_atr_input_masks *input_masks,
1659      -                                      u16 soft_id, u8 queue)
     1560 +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
     1561 +                                    union ixgbe_atr_input *input_mask)
1660 1562  {
1661      -        u32 fdirhash;
1662      -        u32 fdircmd;
1663      -        u32 fdirport, fdirtcpm;
1664      -        u32 fdirvlan;
1665      -        /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
1666      -        u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
1667      -                    IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
     1563 +        /* mask IPv6 since it is currently not supported */
     1564 +        u32 fdirm = IXGBE_FDIRM_DIPv6;
     1565 +        u32 fdirtcpm;
1668 1566  
1669      -        DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
     1567 +        DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1670 1568  
1671 1569          /*
1672      -         * Check flow_type formatting, and bail out before we touch the hardware
1673      -         * if there's a configuration issue
1674      -         */
1675      -        switch (input->formatted.flow_type) {
1676      -        case IXGBE_ATR_FLOW_TYPE_IPV4:
1677      -                /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
1678      -                fdirm |= IXGBE_FDIRM_L4P;
1679      -                /* FALLTHRU */
1680      -        case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1681      -                if (input_masks->dst_port_mask || input_masks->src_port_mask) {
1682      -                        DEBUGOUT(" Error on src/dst port mask\n");
1683      -                        return IXGBE_ERR_CONFIG;
1684      -                }
1685      -                break;
1686      -        case IXGBE_ATR_FLOW_TYPE_TCPV4:
1687      -                break;
1688      -        case IXGBE_ATR_FLOW_TYPE_UDPV4:
1689      -                break;
1690      -        default:
1691      -                DEBUGOUT(" Error on flow type input\n");
1692      -                return IXGBE_ERR_CONFIG;
1693      -        }
1694      -
1695      -        /*
1696 1570           * Program the relevant mask registers.  If src/dst_port or src/dst_addr
1697 1571           * are zero, then assume a full mask for that field.  Also assume that
1698 1572           * a VLAN of 0 is unspecified, so mask that out as well.  L4type
1699 1573           * cannot be masked out in this implementation.
1700 1574           *
1701 1575           * This also assumes IPv4 only.  IPv6 masking isn't supported at this
1702 1576           * point in time.
1703 1577           */
1704 1578  
1705      -        /* Program FDIRM */
1706      -        switch (IXGBE_NTOHS(input_masks->vlan_id_mask) & 0xEFFF) {
1707      -        case 0xEFFF:
1708      -                /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
1709      -                fdirm &= ~IXGBE_FDIRM_VLANID;
1710      -                /* FALLTHRU */
1711      -        case 0xE000:
1712      -                /* Unmask VLAN prio - bit 1 */
1713      -                fdirm &= ~IXGBE_FDIRM_VLANP;
     1579 +        /* verify bucket hash is cleared on hash generation */
     1580 +        if (input_mask->formatted.bkt_hash)
     1581 +                DEBUGOUT(" bucket hash should always be 0 in mask\n");
     1582 +
     1583 +        /* Program FDIRM and verify partial masks */
     1584 +        switch (input_mask->formatted.vm_pool & 0x7F) {
     1585 +        case 0x0:
     1586 +                fdirm |= IXGBE_FDIRM_POOL;
     1587 +        case 0x7F:
1714 1588                  break;
1715      -        case 0x0FFF:
1716      -                /* Unmask VLAN ID - bit 0 */
1717      -                fdirm &= ~IXGBE_FDIRM_VLANID;
     1589 +        default:
     1590 +                DEBUGOUT(" Error on vm pool mask\n");
     1591 +                return IXGBE_ERR_CONFIG;
     1592 +        }
     1593 +
     1594 +        switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
     1595 +        case 0x0:
     1596 +                fdirm |= IXGBE_FDIRM_L4P;
     1597 +                if (input_mask->formatted.dst_port ||
     1598 +                    input_mask->formatted.src_port) {
     1599 +                        DEBUGOUT(" Error on src/dst port mask\n");
     1600 +                        return IXGBE_ERR_CONFIG;
     1601 +                }
     1602 +        case IXGBE_ATR_L4TYPE_MASK:
1718 1603                  break;
     1604 +        default:
     1605 +                DEBUGOUT(" Error on flow type mask\n");
     1606 +                return IXGBE_ERR_CONFIG;
     1607 +        }
     1608 +
     1609 +        switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1719 1610          case 0x0000:
1720      -                /* do nothing, vlans already masked */
     1611 +                /* mask VLAN ID, fall through to mask VLAN priority */
     1612 +                fdirm |= IXGBE_FDIRM_VLANID;
     1613 +        case 0x0FFF:
     1614 +                /* mask VLAN priority */
     1615 +                fdirm |= IXGBE_FDIRM_VLANP;
1721 1616                  break;
     1617 +        case 0xE000:
     1618 +                /* mask VLAN ID only, fall through */
     1619 +                fdirm |= IXGBE_FDIRM_VLANID;
     1620 +        case 0xEFFF:
     1621 +                /* no VLAN fields masked */
     1622 +                break;
1722 1623          default:
1723 1624                  DEBUGOUT(" Error on VLAN mask\n");
1724 1625                  return IXGBE_ERR_CONFIG;
1725 1626          }
1726 1627  
1727      -        if (input_masks->flex_mask & 0xFFFF) {
1728      -                if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
1729      -                        DEBUGOUT(" Error on flexible byte mask\n");
1730      -                        return IXGBE_ERR_CONFIG;
1731      -                }
1732      -                /* Unmask Flex Bytes - bit 4 */
1733      -                fdirm &= ~IXGBE_FDIRM_FLEX;
     1628 +        switch (input_mask->formatted.flex_bytes & 0xFFFF) {
     1629 +        case 0x0000:
     1630 +                /* Mask Flex Bytes, fall through */
     1631 +                fdirm |= IXGBE_FDIRM_FLEX;
     1632 +        case 0xFFFF:
     1633 +                break;
     1634 +        default:
     1635 +                DEBUGOUT(" Error on flexible byte mask\n");
     1636 +                return IXGBE_ERR_CONFIG;
1734 1637          }
1735 1638  
1736 1639          /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1737 1640          IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1738 1641  
1739 1642          /* store the TCP/UDP port masks, bit reversed from port layout */
1740      -        fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
     1643 +        fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1741 1644  
1742 1645          /* write both the same so that UDP and TCP use the same mask */
1743 1646          IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1744 1647          IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1745 1648  
1746 1649          /* store source and destination IP masks (big-enian) */
1747 1650          IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1748      -                             ~input_masks->src_ip_mask[0]);
     1651 +                             ~input_mask->formatted.src_ip[0]);
1749 1652          IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1750      -                             ~input_masks->dst_ip_mask[0]);
     1653 +                             ~input_mask->formatted.dst_ip[0]);
1751 1654  
1752      -        /* Apply masks to input data */
1753      -        input->formatted.vlan_id &= input_masks->vlan_id_mask;
1754      -        input->formatted.flex_bytes &= input_masks->flex_mask;
1755      -        input->formatted.src_port &= input_masks->src_port_mask;
1756      -        input->formatted.dst_port &= input_masks->dst_port_mask;
1757      -        input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
1758      -        input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
     1655 +        return IXGBE_SUCCESS;
     1656 +}
1759 1657  
1760      -        /* record vlan (little-endian) and flex_bytes(big-endian) */
1761      -        fdirvlan =
1762      -                IXGBE_STORE_AS_BE16(IXGBE_NTOHS(input->formatted.flex_bytes));
1763      -        fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1764      -        fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1765      -        IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
     1658 +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
     1659 +                                          union ixgbe_atr_input *input,
     1660 +                                          u16 soft_id, u8 queue)
     1661 +{
     1662 +        u32 fdirport, fdirvlan, fdirhash, fdircmd;
1766 1663  
     1664 +        DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
     1665 +
     1666 +        /* currently IPv6 is not supported, must be programmed with 0 */
     1667 +        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
     1668 +                             input->formatted.src_ip[0]);
     1669 +        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
     1670 +                             input->formatted.src_ip[1]);
     1671 +        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
     1672 +                             input->formatted.src_ip[2]);
     1673 +
     1674 +        /* record the source address (big-endian) */
     1675 +        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
     1676 +
     1677 +        /* record the first 32 bits of the destination address (big-endian) */
     1678 +        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
     1679 +
1767 1680          /* record source and destination port (little-endian)*/
1768 1681          fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1769 1682          fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1770 1683          fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1771 1684          IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1772 1685  
1773      -        /* record the first 32 bits of the destination address (big-endian) */
1774      -        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
     1686 +        /* record vlan (little-endian) and flex_bytes(big-endian) */
     1687 +        fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
     1688 +        fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
     1689 +        fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
     1690 +        IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1775 1691  
1776      -        /* record the source address (big-endian) */
1777      -        IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
     1692 +        /* configure FDIRHASH register */
     1693 +        fdirhash = input->formatted.bkt_hash;
     1694 +        fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
     1695 +        IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1778 1696  
     1697 +        /*
     1698 +         * flush all previous writes to make certain registers are
     1699 +         * programmed prior to issuing the command
     1700 +         */
     1701 +        IXGBE_WRITE_FLUSH(hw);
     1702 +
1779 1703          /* configure FDIRCMD register */
1780 1704          fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1781 1705                    IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
     1706 +        if (queue == IXGBE_FDIR_DROP_QUEUE)
     1707 +                fdircmd |= IXGBE_FDIRCMD_DROP;
1782 1708          fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1783 1709          fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
     1710 +        fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1784 1711  
1785      -        /* we only want the bucket hash so drop the upper 16 bits */
1786      -        fdirhash = ixgbe_atr_compute_hash_82599(input,
1787      -                                                IXGBE_ATR_BUCKET_HASH_KEY);
1788      -        fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1789      -
1790      -        IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1791 1712          IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1792 1713  
1793 1714          return IXGBE_SUCCESS;
1794 1715  }
1795 1716  
     1717 +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
     1718 +                                          union ixgbe_atr_input *input,
     1719 +                                          u16 soft_id)
     1720 +{
     1721 +        u32 fdirhash;
     1722 +        u32 fdircmd = 0;
     1723 +        u32 retry_count;
     1724 +        s32 err = IXGBE_SUCCESS;
     1725 +
     1726 +        /* configure FDIRHASH register */
     1727 +        fdirhash = input->formatted.bkt_hash;
     1728 +        fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
     1729 +        IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
     1730 +
     1731 +        /* flush hash to HW */
     1732 +        IXGBE_WRITE_FLUSH(hw);
     1733 +
     1734 +        /* Query if filter is present */
     1735 +        IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
     1736 +
     1737 +        for (retry_count = 10; retry_count; retry_count--) {
     1738 +                /* allow 10us for query to process */
     1739 +                usec_delay(10);
     1740 +                /* verify query completed successfully */
     1741 +                fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
     1742 +                if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
     1743 +                        break;
     1744 +        }
     1745 +
     1746 +        if (!retry_count)
     1747 +                err = IXGBE_ERR_FDIR_REINIT_FAILED;
     1748 +
     1749 +        /* if filter exists in hardware then remove it */
     1750 +        if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
     1751 +                IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
     1752 +                IXGBE_WRITE_FLUSH(hw);
     1753 +                IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
     1754 +                                IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
     1755 +        }
     1756 +
     1757 +        return err;
     1758 +}
     1759 +
1796 1760  /**
     1761 + *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
     1762 + *  @hw: pointer to hardware structure
     1763 + *  @input: input bitstream
     1764 + *  @input_mask: mask for the input bitstream
     1765 + *  @soft_id: software index for the filters
     1766 + *  @queue: queue index to direct traffic to
     1767 + *
     1768 + *  Note that the caller to this function must lock before calling, since the
     1769 + *  hardware writes must be protected from one another.
     1770 + **/
     1771 +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
     1772 +                                        union ixgbe_atr_input *input,
     1773 +                                        union ixgbe_atr_input *input_mask,
     1774 +                                        u16 soft_id, u8 queue)
     1775 +{
     1776 +        s32 err = IXGBE_ERR_CONFIG;
     1777 +
     1778 +        DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
     1779 +
     1780 +        /*
     1781 +         * Check flow_type formatting, and bail out before we touch the hardware
     1782 +         * if there's a configuration issue
     1783 +         */
     1784 +        switch (input->formatted.flow_type) {
     1785 +        case IXGBE_ATR_FLOW_TYPE_IPV4:
     1786 +                input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
     1787 +                if (input->formatted.dst_port || input->formatted.src_port) {
     1788 +                        DEBUGOUT(" Error on src/dst port\n");
     1789 +                        return IXGBE_ERR_CONFIG;
     1790 +                }
     1791 +                break;
     1792 +        case IXGBE_ATR_FLOW_TYPE_SCTPV4:
     1793 +                if (input->formatted.dst_port || input->formatted.src_port) {
     1794 +                        DEBUGOUT(" Error on src/dst port\n");
     1795 +                        return IXGBE_ERR_CONFIG;
     1796 +                }
     1797 +        case IXGBE_ATR_FLOW_TYPE_TCPV4:
     1798 +        case IXGBE_ATR_FLOW_TYPE_UDPV4:
     1799 +                input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
     1800 +                                                  IXGBE_ATR_L4TYPE_MASK;
     1801 +                break;
     1802 +        default:
     1803 +                DEBUGOUT(" Error on flow type input\n");
     1804 +                return err;
     1805 +        }
     1806 +
     1807 +        /* program input mask into the HW */
     1808 +        err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
     1809 +        if (err)
     1810 +                return err;
     1811 +
     1812 +        /* apply mask and compute/store hash */
     1813 +        ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
     1814 +
     1815 +        /* program filters to filter memory */
     1816 +        return ixgbe_fdir_write_perfect_filter_82599(hw, input,
     1817 +                                                     soft_id, queue);
     1818 +}
     1819 +
     1820 +/**
1797 1821   *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1798 1822   *  @hw: pointer to hardware structure
1799 1823   *  @reg: analog register to read
1800 1824   *  @val: read value
1801 1825   *
1802 1826   *  Performs read operation to Omer analog register specified.
1803 1827   **/
1804 1828  s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
1805 1829  {
1806 1830          u32  core_ctl;
1807 1831  
1808 1832          DEBUGFUNC("ixgbe_read_analog_reg8_82599");
1809 1833  
1810 1834          IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
1811      -                        (reg << 8));
     1835 +                        (reg << 8));
1812 1836          IXGBE_WRITE_FLUSH(hw);
1813 1837          usec_delay(10);
1814 1838          core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
1815 1839          *val = (u8)core_ctl;
1816 1840  
1817 1841          return IXGBE_SUCCESS;
1818 1842  }
1819 1843  
1820 1844  /**
1821 1845   *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
1822 1846   *  @hw: pointer to hardware structure
1823 1847   *  @reg: atlas register to write
1824 1848   *  @val: value to write
1825 1849   *
1826 1850   *  Performs write operation to Omer analog register specified.
1827 1851   **/
1828 1852  s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
1829 1853  {
1830 1854          u32  core_ctl;
1831 1855  
1832 1856          DEBUGFUNC("ixgbe_write_analog_reg8_82599");
  
    | 
      ↓ open down ↓ | 
    11 lines elided | 
    
      ↑ open up ↑ | 
  
1833 1857  
1834 1858          core_ctl = (reg << 8) | val;
1835 1859          IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
1836 1860          IXGBE_WRITE_FLUSH(hw);
1837 1861          usec_delay(10);
1838 1862  
1839 1863          return IXGBE_SUCCESS;
1840 1864  }
1841 1865  
1842 1866  /**
1843      - *  ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx
     1867 + *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
1844 1868   *  @hw: pointer to hardware structure
1845 1869   *
1846 1870   *  Starts the hardware using the generic start_hw function
1847 1871   *  and the generation start_hw function.
1848 1872   *  Then performs revision-specific operations, if any.
1849 1873   **/
1850      -s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
     1874 +s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1851 1875  {
1852 1876          s32 ret_val = IXGBE_SUCCESS;
1853 1877  
1854      -        DEBUGFUNC("ixgbe_start_hw_rev_1__82599");
     1878 +        DEBUGFUNC("ixgbe_start_hw_82599");
1855 1879  
1856 1880          ret_val = ixgbe_start_hw_generic(hw);
1857 1881          if (ret_val != IXGBE_SUCCESS)
1858 1882                  goto out;
1859 1883  
1860 1884          ret_val = ixgbe_start_hw_gen2(hw);
1861 1885          if (ret_val != IXGBE_SUCCESS)
1862 1886                  goto out;
1863 1887  
1864 1888          /* We need to run link autotry after the driver loads */
1865 1889          hw->mac.autotry_restart = TRUE;
1866 1890  
1867 1891          if (ret_val == IXGBE_SUCCESS)
1868 1892                  ret_val = ixgbe_verify_fw_version_82599(hw);
1869 1893  out:
1870 1894          return ret_val;
1871 1895  }
1872 1896  
1873 1897  /**
1874 1898   *  ixgbe_identify_phy_82599 - Get physical layer module
1875 1899   *  @hw: pointer to hardware structure
1876 1900   *
1877 1901   *  Determines the physical layer module found on the current adapter.
1878 1902   *  If PHY already detected, maintains current PHY type in hw struct,
1879 1903   *  otherwise executes the PHY detection routine.
1880 1904   **/
1881 1905  s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1882 1906  {
1883 1907          s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
  
    | 
      ↓ open down ↓ | 
    19 lines elided | 
    
      ↑ open up ↑ | 
  
1884 1908  
1885 1909          DEBUGFUNC("ixgbe_identify_phy_82599");
1886 1910  
1887 1911          /* Detect PHY if not unknown - returns success if already detected. */
1888 1912          status = ixgbe_identify_phy_generic(hw);
1889 1913          if (status != IXGBE_SUCCESS) {
1890 1914                  /* 82599 10GBASE-T requires an external PHY */
1891 1915                  if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1892 1916                          goto out;
1893 1917                  else
1894      -                        status = ixgbe_identify_sfp_module_generic(hw);
     1918 +                        status = ixgbe_identify_module_generic(hw);
1895 1919          }
1896 1920  
1897 1921          /* Set PHY type none if no PHY detected */
1898 1922          if (hw->phy.type == ixgbe_phy_unknown) {
1899 1923                  hw->phy.type = ixgbe_phy_none;
1900 1924                  status = IXGBE_SUCCESS;
1901 1925          }
1902 1926  
1903 1927          /* Return error if SFP module has been detected but is not supported */
1904 1928          if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1905 1929                  status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1906 1930  
1907 1931  out:
1908 1932          return status;
1909 1933  }
1910 1934  
1911 1935  /**
1912 1936   *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
1913 1937   *  @hw: pointer to hardware structure
1914 1938   *
1915 1939   *  Determines physical layer capabilities of the current configuration.
1916 1940   **/
1917 1941  u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1918 1942  {
1919 1943          u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1920 1944          u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1921 1945          u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1922 1946          u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
1923 1947          u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1924 1948          u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
  
    | 
      ↓ open down ↓ | 
    20 lines elided | 
    
      ↑ open up ↑ | 
  
1925 1949          u16 ext_ability = 0;
1926 1950          u8 comp_codes_10g = 0;
1927 1951          u8 comp_codes_1g = 0;
1928 1952  
1929 1953          DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
1930 1954  
1931 1955          hw->phy.ops.identify(hw);
1932 1956  
1933 1957          switch (hw->phy.type) {
1934 1958          case ixgbe_phy_tn:
1935      -        case ixgbe_phy_aq:
1936 1959          case ixgbe_phy_cu_unknown:
1937 1960                  hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1938 1961                  IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1939 1962                  if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1940 1963                          physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1941 1964                  if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1942 1965                          physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1943 1966                  if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1944 1967                          physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1945 1968                  goto out;
1946 1969          default:
1947 1970                  break;
1948 1971          }
1949 1972  
1950 1973          switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1951 1974          case IXGBE_AUTOC_LMS_1G_AN:
1952 1975          case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1953 1976                  if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
1954 1977                          physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
1955 1978                              IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1956 1979                          goto out;
1957 1980                  }
1958 1981                  /* SFI mode so read SFP module */
1959 1982                  goto sfp_check;
1960 1983          case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1961 1984                  if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
1962 1985                          physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1963 1986                  else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
1964 1987                          physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1965 1988                  else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
1966 1989                          physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
1967 1990                  goto out;
1968 1991          case IXGBE_AUTOC_LMS_10G_SERIAL:
1969 1992                  if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
1970 1993                          physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
1971 1994                          goto out;
1972 1995                  } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
1973 1996                          goto sfp_check;
1974 1997                  break;
1975 1998          case IXGBE_AUTOC_LMS_KX4_KX_KR:
1976 1999          case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
1977 2000                  if (autoc & IXGBE_AUTOC_KX_SUPP)
1978 2001                          physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1979 2002                  if (autoc & IXGBE_AUTOC_KX4_SUPP)
1980 2003                          physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1981 2004                  if (autoc & IXGBE_AUTOC_KR_SUPP)
1982 2005                          physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
1983 2006                  goto out;
1984 2007          default:
1985 2008                  goto out;
1986 2009          }
1987 2010  
1988 2011  sfp_check:
1989 2012          /* SFP check must be done last since DA modules are sometimes used to
1990 2013           * test KR mode -  we need to id KR mode correctly before SFP module.
1991 2014           * Call identify_sfp because the pluggable module may have changed */
1992 2015          hw->phy.ops.identify_sfp(hw);
1993 2016          if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1994 2017                  goto out;
1995 2018  
1996 2019          switch (hw->phy.type) {
1997 2020          case ixgbe_phy_sfp_passive_tyco:
1998 2021          case ixgbe_phy_sfp_passive_unknown:
1999 2022                  physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2000 2023                  break;
2001 2024          case ixgbe_phy_sfp_ftl_active:
2002 2025          case ixgbe_phy_sfp_active_unknown:
2003 2026                  physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2004 2027                  break;
2005 2028          case ixgbe_phy_sfp_avago:
2006 2029          case ixgbe_phy_sfp_ftl:
2007 2030          case ixgbe_phy_sfp_intel:
2008 2031          case ixgbe_phy_sfp_unknown:
  
    | 
      ↓ open down ↓ | 
    63 lines elided | 
    
      ↑ open up ↑ | 
  
2009 2032                  hw->phy.ops.read_i2c_eeprom(hw,
2010 2033                        IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2011 2034                  hw->phy.ops.read_i2c_eeprom(hw,
2012 2035                        IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2013 2036                  if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2014 2037                          physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2015 2038                  else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2016 2039                          physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2017 2040                  else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2018 2041                          physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
     2042 +                else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
     2043 +                        physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
2019 2044                  break;
2020 2045          default:
2021 2046                  break;
2022 2047          }
2023 2048  
2024 2049  out:
2025 2050          return physical_layer;
2026 2051  }
2027 2052  
2028 2053  /**
2029 2054   *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2030 2055   *  @hw: pointer to hardware structure
2031 2056   *  @regval: register value to write to RXCTRL
2032 2057   *
2033 2058   *  Enables the Rx DMA unit for 82599
2034 2059   **/
2035 2060  s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2036 2061  {
2037      -#define IXGBE_MAX_SECRX_POLL 30
2038      -        int i;
2039      -        int secrxreg;
2040 2062  
2041 2063          DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2042 2064  
2043 2065          /*
2044 2066           * Workaround for 82599 silicon errata when enabling the Rx datapath.
2045 2067           * If traffic is incoming before we enable the Rx unit, it could hang
2046 2068           * the Rx DMA unit.  Therefore, make sure the security engine is
2047 2069           * completely disabled prior to enabling the Rx unit.
2048 2070           */
2049      -        secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2050      -        secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2051      -        IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2052      -        for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2053      -                secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2054      -                if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2055      -                        break;
2056      -                else
2057      -                        /* Use interrupt-safe sleep just in case */
2058      -                        usec_delay(10);
2059      -        }
2060 2071  
2061      -        /* For informational purposes only */
2062      -        if (i >= IXGBE_MAX_SECRX_POLL)
2063      -                DEBUGOUT("Rx unit being enabled before security "
2064      -                         "path fully disabled.  Continuing with init.\n");
     2072 +        hw->mac.ops.disable_sec_rx_path(hw);
2065 2073  
2066 2074          IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2067      -        secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2068      -        secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2069      -        IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2070      -        IXGBE_WRITE_FLUSH(hw);
2071 2075  
     2076 +        hw->mac.ops.enable_sec_rx_path(hw);
     2077 +
2072 2078          return IXGBE_SUCCESS;
2073 2079  }
2074 2080  
2075 2081  /**
2076 2082   *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
2077 2083   *  @hw: pointer to hardware structure
2078 2084   *
2079 2085   *  Verifies that installed the firmware version is 0.6 or higher
2080 2086   *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2081 2087   *
2082 2088   *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2083 2089   *  if the FW version is not supported.
2084 2090   **/
2085 2091  static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2086 2092  {
2087 2093          s32 status = IXGBE_ERR_EEPROM_VERSION;
2088 2094          u16 fw_offset, fw_ptp_cfg_offset;
2089 2095          u16 fw_version = 0;
2090 2096  
2091 2097          DEBUGFUNC("ixgbe_verify_fw_version_82599");
2092 2098  
2093 2099          /* firmware check is only necessary for SFI devices */
2094 2100          if (hw->phy.media_type != ixgbe_media_type_fiber) {
2095 2101                  status = IXGBE_SUCCESS;
2096 2102                  goto fw_version_out;
  
    | 
      ↓ open down ↓ | 
    15 lines elided | 
    
      ↑ open up ↑ | 
  
2097 2103          }
2098 2104  
2099 2105          /* get the offset to the Firmware Module block */
2100 2106          hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2101 2107  
2102 2108          if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2103 2109                  goto fw_version_out;
2104 2110  
2105 2111          /* get the offset to the Pass Through Patch Configuration block */
2106 2112          hw->eeprom.ops.read(hw, (fw_offset +
2107      -                                 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2108      -                                 &fw_ptp_cfg_offset);
     2113 +                                 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
     2114 +                                 &fw_ptp_cfg_offset);
2109 2115  
2110 2116          if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2111 2117                  goto fw_version_out;
2112 2118  
2113 2119          /* get the firmware version */
2114 2120          hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2115      -                                 IXGBE_FW_PATCH_VERSION_4),
2116      -                                 &fw_version);
     2121 +                            IXGBE_FW_PATCH_VERSION_4), &fw_version);
2117 2122  
2118 2123          if (fw_version > 0x5)
2119 2124                  status = IXGBE_SUCCESS;
2120 2125  
2121 2126  fw_version_out:
2122 2127          return status;
2123 2128  }
2124 2129  
2125 2130  /**
2126 2131   *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2127 2132   *  @hw: pointer to hardware structure
2128 2133   *
2129 2134   *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
2130 2135   *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2131 2136   **/
2132 2137  bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2133 2138  {
2134 2139          bool lesm_enabled = FALSE;
2135 2140          u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2136 2141          s32 status;
2137 2142  
2138 2143          DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
  
    | 
      ↓ open down ↓ | 
    12 lines elided | 
    
      ↑ open up ↑ | 
  
2139 2144  
2140 2145          /* get the offset to the Firmware Module block */
2141 2146          status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2142 2147  
2143 2148          if ((status != IXGBE_SUCCESS) ||
2144 2149              (fw_offset == 0) || (fw_offset == 0xFFFF))
2145 2150                  goto out;
2146 2151  
2147 2152          /* get the offset to the LESM Parameters block */
2148 2153          status = hw->eeprom.ops.read(hw, (fw_offset +
2149      -                                 IXGBE_FW_LESM_PARAMETERS_PTR),
2150      -                                 &fw_lesm_param_offset);
     2154 +                                     IXGBE_FW_LESM_PARAMETERS_PTR),
     2155 +                                     &fw_lesm_param_offset);
2151 2156  
2152 2157          if ((status != IXGBE_SUCCESS) ||
2153 2158              (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2154 2159                  goto out;
2155 2160  
2156 2161          /* get the lesm state word */
2157 2162          status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2158      -                                     IXGBE_FW_LESM_STATE_1),
2159      -                                     &fw_lesm_state);
     2163 +                                     IXGBE_FW_LESM_STATE_1),
     2164 +                                     &fw_lesm_state);
2160 2165  
2161 2166          if ((status == IXGBE_SUCCESS) &&
2162 2167              (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2163 2168                  lesm_enabled = TRUE;
2164 2169  
2165 2170  out:
2166 2171          return lesm_enabled;
2167 2172  }
2168 2173  
     2174 +/**
     2175 + *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
     2176 + *  fastest available method
     2177 + *
     2178 + *  @hw: pointer to hardware structure
     2179 + *  @offset: offset of  word in EEPROM to read
     2180 + *  @words: number of words
     2181 + *  @data: word(s) read from the EEPROM
     2182 + *
     2183 + *  Retrieves 16 bit word(s) read from EEPROM
     2184 + **/
     2185 +static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
     2186 +                                          u16 words, u16 *data)
     2187 +{
     2188 +        struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
     2189 +        s32 ret_val = IXGBE_ERR_CONFIG;
     2190 +
     2191 +        DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
     2192 +
     2193 +        /*
     2194 +         * If EEPROM is detected and can be addressed using 14 bits,
     2195 +         * use EERD otherwise use bit bang
     2196 +         */
     2197 +        if ((eeprom->type == ixgbe_eeprom_spi) &&
     2198 +            (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
     2199 +                ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
     2200 +                                                         data);
     2201 +        else
     2202 +                ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
     2203 +                                                                    words,
     2204 +                                                                    data);
     2205 +
     2206 +        return ret_val;
     2207 +}
     2208 +
     2209 +/**
     2210 + *  ixgbe_read_eeprom_82599 - Read EEPROM word using
     2211 + *  fastest available method
     2212 + *
     2213 + *  @hw: pointer to hardware structure
     2214 + *  @offset: offset of  word in the EEPROM to read
     2215 + *  @data: word read from the EEPROM
     2216 + *
     2217 + *  Reads a 16 bit word from the EEPROM
     2218 + **/
     2219 +static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
     2220 +                                   u16 offset, u16 *data)
     2221 +{
     2222 +        struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
     2223 +        s32 ret_val = IXGBE_ERR_CONFIG;
     2224 +
     2225 +        DEBUGFUNC("ixgbe_read_eeprom_82599");
     2226 +
     2227 +        /*
     2228 +         * If EEPROM is detected and can be addressed using 14 bits,
     2229 +         * use EERD otherwise use bit bang
     2230 +         */
     2231 +        if ((eeprom->type == ixgbe_eeprom_spi) &&
     2232 +            (offset <= IXGBE_EERD_MAX_ADDR))
     2233 +                ret_val = ixgbe_read_eerd_generic(hw, offset, data);
     2234 +        else
     2235 +                ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
     2236 +
     2237 +        return ret_val;
     2238 +}
     2239 +
2169 2240  
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX