Print this page
3014 Intel X540 Support


   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
  24  */
  25 
  26 /*
  27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.


  28  */
  29 
  30 #include "ixgbe_sw.h"
  31 
  32 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
  33 static char ixgbe_version[] = "ixgbe 1.1.7";
  34 
  35 /*
  36  * Local function protoypes
  37  */
  38 static int ixgbe_register_mac(ixgbe_t *);
  39 static int ixgbe_identify_hardware(ixgbe_t *);
  40 static int ixgbe_regs_map(ixgbe_t *);
  41 static void ixgbe_init_properties(ixgbe_t *);
  42 static int ixgbe_init_driver_settings(ixgbe_t *);
  43 static void ixgbe_init_locks(ixgbe_t *);
  44 static void ixgbe_destroy_locks(ixgbe_t *);
  45 static int ixgbe_init(ixgbe_t *);
  46 static int ixgbe_chip_start(ixgbe_t *);
  47 static void ixgbe_chip_stop(ixgbe_t *);


 274         0xFF8,          /* maximum interrupt throttle rate */
 275         0,              /* minimum interrupt throttle rate */
 276         200,            /* default interrupt throttle rate */
 277         64,             /* maximum total msix vectors */
 278         16,             /* maximum number of ring vectors */
 279         2,              /* maximum number of other vectors */
 280         (IXGBE_EICR_LSC
 281         | IXGBE_EICR_GPI_SDP1
 282         | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
 283 
 284         (IXGBE_SDP1_GPIEN
 285         | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
 286 
 287         (IXGBE_FLAG_DCA_CAPABLE
 288         | IXGBE_FLAG_RSS_CAPABLE
 289         | IXGBE_FLAG_VMDQ_CAPABLE
 290         | IXGBE_FLAG_RSC_CAPABLE
 291         | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
 292 };
 293 






























 294 /*
 295  * Module Initialization Functions.
 296  */
 297 
 298 int
 299 _init(void)
 300 {
 301         int status;
 302 
 303         mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
 304 
 305         status = mod_install(&ixgbe_modlinkage);
 306 
 307         if (status != DDI_SUCCESS) {
 308                 mac_fini_ops(&ixgbe_dev_ops);
 309         }
 310 
 311         return (status);
 312 }
 313 


 850                 ixgbe->capab = &ixgbe_82598eb_cap;
 851 
 852                 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
 853                         ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
 854                         ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
 855                         ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
 856                 }
 857                 break;
 858 
 859         case ixgbe_mac_82599EB:
 860                 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
 861                 ixgbe->capab = &ixgbe_82599eb_cap;
 862 
 863                 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
 864                         ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
 865                         ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
 866                         ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
 867                 }
 868                 break;
 869 









 870         default:
 871                 IXGBE_DEBUGLOG_1(ixgbe,
 872                     "adapter not supported in ixgbe_identify_hardware(): %d\n",
 873                     hw->mac.type);
 874                 return (IXGBE_FAILURE);
 875         }
 876 
 877         return (IXGBE_SUCCESS);
 878 }
 879 
 880 /*
 881  * ixgbe_regs_map - Map the device registers.
 882  *
 883  */
 884 static int
 885 ixgbe_regs_map(ixgbe_t *ixgbe)
 886 {
 887         dev_info_t *devinfo = ixgbe->dip;
 888         struct ixgbe_hw *hw = &ixgbe->hw;
 889         struct ixgbe_osdep *osdep = &ixgbe->osdep;


1183          */
1184         if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1185                 /*
1186                  * Some PCI-E parts fail the first check due to
1187                  * the link being in sleep state.  Call it again,
1188                  * if it fails a second time it's a real issue.
1189                  */
1190                 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1191                         ixgbe_error(ixgbe,
1192                             "Invalid NVM checksum. Please contact "
1193                             "the vendor to update the NVM.");
1194                         ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1195                         goto init_fail;
1196                 }
1197         }
1198 
1199         /*
1200          * Setup default flow control thresholds - enable/disable
1201          * & flow control type is controlled by ixgbe.conf
1202          */
1203         hw->fc.high_water = DEFAULT_FCRTH;
1204         hw->fc.low_water = DEFAULT_FCRTL;
1205         hw->fc.pause_time = DEFAULT_FCPAUSE;
1206         hw->fc.send_xon = B_TRUE;
1207 
1208         /*
1209          * Initialize link settings
1210          */
1211         (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1212 
1213         /*
1214          * Initialize the chipset hardware
1215          */
1216         if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1217                 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1218                 goto init_fail;
1219         }
1220 
1221         if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1222                 goto init_fail;
1223         }
1224 


2073 
2074         /*
2075          * Setup head & tail pointers
2076          */
2077         IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2078             rx_data->ring_size - 1);
2079         IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2080 
2081         rx_data->rbd_next = 0;
2082         rx_data->lro_first = 0;
2083 
2084         /*
2085          * Setup the Receive Descriptor Control Register (RXDCTL)
2086          * PTHRESH=32 descriptors (half the internal cache)
2087          * HTHRESH=0 descriptors (to minimize latency on fetch)
2088          * WTHRESH defaults to 1 (writeback each descriptor)
2089          */
2090         reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2091         reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2092 
2093         /* Not a valid value for 82599 */
2094         if (hw->mac.type < ixgbe_mac_82599EB) {
2095                 reg_val |= 0x0020;      /* pthresh */
2096         }
2097         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2098 
2099         if (hw->mac.type == ixgbe_mac_82599EB) {

2100                 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2101                 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2102                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2103         }
2104 
2105         /*
2106          * Setup the Split and Replication Receive Control Register.
2107          * Set the rx buffer size and the advanced descriptor type.
2108          */
2109         reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2110             IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2111         reg_val |= IXGBE_SRRCTL_DROP_EN;
2112         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2113 }
2114 
2115 static void
2116 ixgbe_setup_rx(ixgbe_t *ixgbe)
2117 {
2118         ixgbe_rx_ring_t *rx_ring;
2119         struct ixgbe_hw *hw = &ixgbe->hw;


2316                  */
2317                 tx_ring->tbd_head_wb = (uint32_t *)
2318                     ((uintptr_t)tx_ring->tbd_area.address + size);
2319                 *tx_ring->tbd_head_wb = 0;
2320 
2321                 buf_low = (uint32_t)
2322                     (tx_ring->tbd_area.dma_address + size);
2323                 buf_high = (uint32_t)
2324                     ((tx_ring->tbd_area.dma_address + size) >> 32);
2325 
2326                 /* Set the head write-back enable bit */
2327                 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2328 
2329                 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2330                 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2331 
2332                 /*
2333                  * Turn off relaxed ordering for head write back or it will
2334                  * cause problems with the tx recycling
2335                  */
2336                 reg_val = IXGBE_READ_REG(hw,
2337                     IXGBE_DCA_TXCTRL(tx_ring->index));
2338                 reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;



2339                 IXGBE_WRITE_REG(hw,
2340                     IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2341         } else {




2342                 tx_ring->tbd_head_wb = NULL;
2343         }
2344 
2345         tx_ring->tbd_head = 0;
2346         tx_ring->tbd_tail = 0;
2347         tx_ring->tbd_free = tx_ring->ring_size;
2348 
2349         if (ixgbe->tx_ring_init == B_TRUE) {
2350                 tx_ring->tcb_head = 0;
2351                 tx_ring->tcb_tail = 0;
2352                 tx_ring->tcb_free = tx_ring->free_list_size;
2353         }
2354 
2355         /*
2356          * Initialize the s/w context structure
2357          */
2358         bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2359 }
2360 
2361 static void


2369 
2370         for (i = 0; i < ixgbe->num_tx_rings; i++) {
2371                 tx_ring = &ixgbe->tx_rings[i];
2372                 ixgbe_setup_tx_ring(tx_ring);
2373         }
2374 
2375         /*
2376          * Setup the per-ring statistics mapping.
2377          */
2378         ring_mapping = 0;
2379         for (i = 0; i < ixgbe->num_tx_rings; i++) {
2380                 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2381                 if ((i & 0x3) == 0x3) {
2382                         switch (hw->mac.type) {
2383                         case ixgbe_mac_82598EB:
2384                                 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2385                                     ring_mapping);
2386                                 break;
2387 
2388                         case ixgbe_mac_82599EB:

2389                                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2390                                     ring_mapping);
2391                                 break;
2392 
2393                         default:
2394                                 break;
2395                         }
2396 
2397                         ring_mapping = 0;
2398                 }
2399         }
2400         if (i & 0x3) {
2401                 switch (hw->mac.type) {
2402                 case ixgbe_mac_82598EB:
2403                         IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2404                         break;
2405 
2406                 case ixgbe_mac_82599EB:

2407                         IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2408                         break;
2409 
2410                 default:
2411                         break;
2412                 }
2413         }
2414 
2415         /*
2416          * Enable CRC appending and TX padding (for short tx frames)
2417          */
2418         reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2419         reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2420         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2421 
2422         /*
2423          * enable DMA for 82599 parts
2424          */
2425         if (hw->mac.type == ixgbe_mac_82599EB) {

2426         /* DMATXCTL.TE must be set after all Tx config is complete */
2427                 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2428                 reg_val |= IXGBE_DMATXCTL_TE;
2429                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);








2430         }
2431 
2432         /*
2433          * Enabling tx queues ..
2434          * For 82599 must be done after DMATXCTL.TE is set
2435          */
2436         for (i = 0; i < ixgbe->num_tx_rings; i++) {
2437                 tx_ring = &ixgbe->tx_rings[i];
2438                 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2439                 reg_val |= IXGBE_TXDCTL_ENABLE;
2440                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2441         }
2442 }
2443 
2444 /*
2445  * ixgbe_setup_rss - Setup receive-side scaling feature.
2446  */
2447 static void
2448 ixgbe_setup_rss(ixgbe_t *ixgbe)
2449 {


2509 {
2510         struct ixgbe_hw *hw = &ixgbe->hw;
2511         uint32_t vmdctl, i, vtctl;
2512 
2513         /*
2514          * Setup the VMDq Control register, enable VMDq based on
2515          * packet destination MAC address:
2516          */
2517         switch (hw->mac.type) {
2518         case ixgbe_mac_82598EB:
2519                 /*
2520                  * VMDq Enable = 1;
2521                  * VMDq Filter = 0; MAC filtering
2522                  * Default VMDq output index = 0;
2523                  */
2524                 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2525                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2526                 break;
2527 
2528         case ixgbe_mac_82599EB:

2529                 /*
2530                  * Enable VMDq-only.
2531                  */
2532                 vmdctl = IXGBE_MRQC_VMDQEN;
2533                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2534 
2535                 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2536                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2537                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2538                 }
2539 
2540                 /*
2541                  * Enable Virtualization and Replication.
2542                  */
2543                 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2544                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2545 
2546                 /*
2547                  * Enable receiving packets to all VFs
2548                  */


2602                     IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2603                     IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2604                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2605                     IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2606                     IXGBE_MRQC_RSS_FIELD_IPV6 |
2607                     IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2608                     IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2609                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2610                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2611 
2612                 /*
2613                  * Enable and Setup VMDq
2614                  * VMDq Filter = 0; MAC filtering
2615                  * Default VMDq output index = 0;
2616                  */
2617                 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2618                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2619                 break;
2620 
2621         case ixgbe_mac_82599EB:

2622                 /*
2623                  * Enable RSS & Setup RSS Hash functions
2624                  */
2625                 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2626                     IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2627                     IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2628                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2629                     IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2630                     IXGBE_MRQC_RSS_FIELD_IPV6 |
2631                     IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2632                     IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2633                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2634 
2635                 /*
2636                  * Enable VMDq+RSS.
2637                  */
2638                 if (ixgbe->num_rx_groups > 32)  {
2639                         mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2640                 } else {
2641                         mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;


2647                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2648                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2649                 }
2650                 break;
2651 
2652         default:
2653                 break;
2654 
2655         }
2656 
2657         /*
2658          * Disable Packet Checksum to enable RSS for multiple receive queues.
2659          * It is an adapter hardware limitation that Packet Checksum is
2660          * mutually exclusive with RSS.
2661          */
2662         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2663         rxcsum |= IXGBE_RXCSUM_PCSD;
2664         rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2665         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2666 
2667         if (hw->mac.type == ixgbe_mac_82599EB) {

2668                 /*
2669                  * Enable Virtualization and Replication.
2670                  */
2671                 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2672                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2673 
2674                 /*
2675                  * Enable receiving packets to all VFs
2676                  */
2677                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2678                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2679         }
2680 }
2681 
2682 /*
2683  * ixgbe_init_unicst - Initialize the unicast addresses.
2684  */
2685 static void
2686 ixgbe_init_unicst(ixgbe_t *ixgbe)
2687 {


2822  * and save them in the hardware registers.
2823  */
2824 static void
2825 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2826 {
2827         uint8_t *mc_addr_list;
2828         uint32_t mc_addr_count;
2829         struct ixgbe_hw *hw = &ixgbe->hw;
2830 
2831         ASSERT(mutex_owned(&ixgbe->gen_lock));
2832 
2833         ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2834 
2835         mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2836         mc_addr_count = ixgbe->mcast_count;
2837 
2838         /*
2839          * Update the multicast addresses to the MTA registers
2840          */
2841         (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2842             ixgbe_mc_table_itr);
2843 }
2844 
2845 /*
2846  * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2847  *
2848  * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2849  * Different chipsets may have different allowed configuration of vmdq and rss.
2850  */
2851 static void
2852 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2853 {
2854         struct ixgbe_hw *hw = &ixgbe->hw;
2855         uint32_t ring_per_group;
2856 
2857         switch (hw->mac.type) {
2858         case ixgbe_mac_82598EB:
2859                 /*
2860                  * 82598 supports the following combination:
2861                  * vmdq no. x rss no.
2862                  * [5..16]  x 1
2863                  * [1..4]   x [1..16]
2864                  * However 8 rss queue per pool (vmdq) is sufficient for
2865                  * most cases.
2866                  */
2867                 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2868                 if (ixgbe->num_rx_groups > 4) {
2869                         ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2870                 } else {
2871                         ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2872                             min(8, ring_per_group);
2873                 }
2874 
2875                 break;
2876 
2877         case ixgbe_mac_82599EB:

2878                 /*
2879                  * 82599 supports the following combination:
2880                  * vmdq no. x rss no.
2881                  * [33..64] x [1..2]
2882                  * [2..32]  x [1..4]
2883                  * 1 x [1..16]
2884                  * However 8 rss queue per pool (vmdq) is sufficient for
2885                  * most cases.


2886                  */
2887                 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2888                 if (ixgbe->num_rx_groups == 1) {
2889                         ixgbe->num_rx_rings = min(8, ring_per_group);
2890                 } else if (ixgbe->num_rx_groups <= 32) {
2891                         ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2892                             min(4, ring_per_group);
2893                 } else if (ixgbe->num_rx_groups <= 64) {
2894                         ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2895                             min(2, ring_per_group);
2896                 }
2897                 break;
2898 
2899         default:
2900                 break;
2901         }
2902 
2903         ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2904 
2905         if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {


3028          * 1 = force interrupt type MSI-X
3029          * 2 = force interrupt type MSI
3030          * 3 = force interrupt type Legacy
3031          */
3032         ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3033             IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3034 
3035         ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3036             0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3037         ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3038             0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3039         ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3040             0, 1, DEFAULT_LSO_ENABLE);
3041         ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3042             0, 1, DEFAULT_LRO_ENABLE);
3043         ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3044             0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3045         ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3046             PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3047 
3048         /* Head Write Back not recommended for 82599 */
3049         if (hw->mac.type >= ixgbe_mac_82599EB) {

3050                 ixgbe->tx_head_wb_enable = B_FALSE;
3051         }
3052 
3053         /*
3054          * ixgbe LSO needs the tx h/w checksum support.
3055          * LSO will be disabled if tx h/w checksum is not
3056          * enabled.
3057          */
3058         if (ixgbe->tx_hcksum_enable == B_FALSE) {
3059                 ixgbe->lso_enable = B_FALSE;
3060         }
3061 
3062         /*
3063          * ixgbe LRO needs the rx h/w checksum support.
3064          * LRO will be disabled if rx h/w checksum is not
3065          * enabled.
3066          */
3067         if (ixgbe->rx_hcksum_enable == B_FALSE) {
3068                 ixgbe->lro_enable = B_FALSE;
3069         }
3070 
3071         /*
3072          * ixgbe LRO only been supported by 82599 now
3073          */
3074         if (hw->mac.type != ixgbe_mac_82599EB) {
3075                 ixgbe->lro_enable = B_FALSE;
3076         }
3077         ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3078             MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3079             DEFAULT_TX_COPY_THRESHOLD);
3080         ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3081             PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3082             MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3083         ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3084             PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3085             MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3086         ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3087             PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3088             MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3089 
3090         ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3091             MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3092             DEFAULT_RX_COPY_THRESHOLD);
3093         ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3094             MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3095             DEFAULT_RX_LIMIT_PER_INTR);
3096 
3097         ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3098             ixgbe->capab->min_intr_throttle,
3099             ixgbe->capab->max_intr_throttle,
3100             ixgbe->capab->def_intr_throttle);
3101         /*
3102          * 82599 requires the interupt throttling rate is
3103          * a multiple of 8. This is enforced by the register
3104          * definiton.
3105          */
3106         if (hw->mac.type == ixgbe_mac_82599EB)
3107                 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3108 }
3109 
3110 static void
3111 ixgbe_init_params(ixgbe_t *ixgbe)
3112 {
3113         ixgbe->param_en_10000fdx_cap = 1;
3114         ixgbe->param_en_1000fdx_cap = 1;
3115         ixgbe->param_en_100fdx_cap = 1;
3116         ixgbe->param_adv_10000fdx_cap = 1;
3117         ixgbe->param_adv_1000fdx_cap = 1;
3118         ixgbe->param_adv_100fdx_cap = 1;
3119 
3120         ixgbe->param_pause_cap = 1;
3121         ixgbe->param_asym_pause_cap = 1;
3122         ixgbe->param_rem_fault = 0;
3123 
3124         ixgbe->param_adv_autoneg_cap = 1;
3125         ixgbe->param_adv_pause_cap = 1;
3126         ixgbe->param_adv_asym_pause_cap = 1;


3212 /*
3213  * ixgbe_driver_link_check - Link status processing.
3214  *
3215  * This function can be called in both kernel context and interrupt context
3216  */
3217 static void
3218 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3219 {
3220         struct ixgbe_hw *hw = &ixgbe->hw;
3221         ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3222         boolean_t link_up = B_FALSE;
3223         boolean_t link_changed = B_FALSE;
3224 
3225         ASSERT(mutex_owned(&ixgbe->gen_lock));
3226 
3227         (void) ixgbe_check_link(hw, &speed, &link_up, false);
3228         if (link_up) {
3229                 ixgbe->link_check_complete = B_TRUE;
3230 
3231                 /* Link is up, enable flow control settings */
3232                 (void) ixgbe_fc_enable(hw, 0);
3233 
3234                 /*
3235                  * The Link is up, check whether it was marked as down earlier
3236                  */
3237                 if (ixgbe->link_state != LINK_STATE_UP) {
3238                         switch (speed) {
3239                         case IXGBE_LINK_SPEED_10GB_FULL:
3240                                 ixgbe->link_speed = SPEED_10GB;
3241                                 break;
3242                         case IXGBE_LINK_SPEED_1GB_FULL:
3243                                 ixgbe->link_speed = SPEED_1GB;
3244                                 break;
3245                         case IXGBE_LINK_SPEED_100_FULL:
3246                                 ixgbe->link_speed = SPEED_100;
3247                         }
3248                         ixgbe->link_duplex = LINK_DUPLEX_FULL;
3249                         ixgbe->link_state = LINK_STATE_UP;
3250                         link_changed = B_TRUE;
3251                 }
3252         } else {


3715          */
3716         if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3717                 /* enable autoclear but not on bits 29:20 */
3718                 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3719 
3720                 /* general purpose interrupt enable */
3721                 gpie |= (IXGBE_GPIE_MSIX_MODE
3722                     | IXGBE_GPIE_PBA_SUPPORT
3723                     | IXGBE_GPIE_OCD
3724                     | IXGBE_GPIE_EIAME);
3725         /*
3726          * non-msi-x mode
3727          */
3728         } else {
3729 
3730                 /* disable autoclear, leave gpie at default */
3731                 eiac = 0;
3732 
3733                 /*
3734                  * General purpose interrupt enable.
3735                  * For 82599, extended interrupt automask enable
3736                  * only in MSI or MSI-X mode
3737                  */
3738                 if ((hw->mac.type < ixgbe_mac_82599EB) ||
3739                     (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3740                         gpie |= IXGBE_GPIE_EIAME;
3741                 }
3742         }
3743 
3744         /* Enable specific "other" interrupt types */
3745         switch (hw->mac.type) {
3746         case ixgbe_mac_82598EB:
3747                 gpie |= ixgbe->capab->other_gpie;
3748                 break;
3749 
3750         case ixgbe_mac_82599EB:

3751                 gpie |= ixgbe->capab->other_gpie;
3752 
3753                 /* Enable RSC Delay 8us when LRO enabled  */
3754                 if (ixgbe->lro_enable) {
3755                         gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3756                 }
3757                 break;
3758 
3759         default:
3760                 break;
3761         }
3762 
3763         /* write to interrupt control registers */
3764         IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3765         IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3766         IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3767         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3768         IXGBE_WRITE_FLUSH(hw);
3769 }
3770 


3924                 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3925                     &atlas);
3926                 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3927                 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3928                     atlas);
3929 
3930                 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3931                     &atlas);
3932                 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3933                 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3934                     atlas);
3935 
3936                 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3937                     &atlas);
3938                 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3939                 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3940                     atlas);
3941                 break;
3942 
3943         case ixgbe_mac_82599EB:

3944                 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3945                 reg |= (IXGBE_AUTOC_FLU |
3946                     IXGBE_AUTOC_10G_KX4);
3947                 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3948 
3949                 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
3950                     B_FALSE, B_TRUE);
3951                 break;
3952 
3953         default:
3954                 break;
3955         }
3956 }
3957 
3958 #pragma inline(ixgbe_intr_rx_work)
3959 /*
3960  * ixgbe_intr_rx_work - RX processing of ISR.
3961  */
3962 static void
3963 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)


4142                          * Recycle the tx descriptors
4143                          */
4144                         tx_ring = &ixgbe->tx_rings[0];
4145                         tx_ring->tx_recycle(tx_ring);
4146 
4147                         /*
4148                          * Schedule the re-transmit
4149                          */
4150                         tx_reschedule = (tx_ring->reschedule &&
4151                             (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4152                 }
4153 
4154                 /* any interrupt type other than tx/rx */
4155                 if (eicr & ixgbe->capab->other_intr) {
4156                         switch (hw->mac.type) {
4157                         case ixgbe_mac_82598EB:
4158                                 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4159                                 break;
4160 
4161                         case ixgbe_mac_82599EB:

4162                                 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4163                                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4164                                 break;
4165 
4166                         default:
4167                                 break;
4168                         }
4169                         ixgbe_intr_other_work(ixgbe, eicr);
4170                         ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4171                 }
4172 
4173                 mutex_exit(&ixgbe->gen_lock);
4174 
4175                 result = DDI_INTR_CLAIMED;
4176         } else {
4177                 mutex_exit(&ixgbe->gen_lock);
4178 
4179                 /*
4180                  * No interrupt cause bits set: don't claim this interrupt.
4181                  */


4235         if (eicr & 0x1) {
4236                 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4237         }
4238 
4239         /*
4240          * For MSI interrupt, tx rings[0] will use RTxQ[1].
4241          */
4242         if (eicr & 0x2) {
4243                 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4244         }
4245 
4246         /* any interrupt type other than tx/rx */
4247         if (eicr & ixgbe->capab->other_intr) {
4248                 mutex_enter(&ixgbe->gen_lock);
4249                 switch (hw->mac.type) {
4250                 case ixgbe_mac_82598EB:
4251                         ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4252                         break;
4253 
4254                 case ixgbe_mac_82599EB:

4255                         ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4256                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4257                         break;
4258 
4259                 default:
4260                         break;
4261                 }
4262                 ixgbe_intr_other_work(ixgbe, eicr);
4263                 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4264                 mutex_exit(&ixgbe->gen_lock);
4265         }
4266 
4267         /* re-enable the interrupts which were automasked */
4268         IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4269 
4270         return (DDI_INTR_CLAIMED);
4271 }
4272 
4273 /*
4274  * ixgbe_intr_msix - Interrupt handler for MSI-X.


4314                 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4315                     DDI_FM_OK) {
4316                         ddi_fm_service_impact(ixgbe->dip,
4317                             DDI_SERVICE_DEGRADED);
4318                         atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4319                         return (DDI_INTR_CLAIMED);
4320                 }
4321 
4322                 /*
4323                  * Check "other" cause bits: any interrupt type other than tx/rx
4324                  */
4325                 if (eicr & ixgbe->capab->other_intr) {
4326                         mutex_enter(&ixgbe->gen_lock);
4327                         switch (hw->mac.type) {
4328                         case ixgbe_mac_82598EB:
4329                                 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4330                                 ixgbe_intr_other_work(ixgbe, eicr);
4331                                 break;
4332 
4333                         case ixgbe_mac_82599EB:

4334                                 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4335                                 ixgbe_intr_other_work(ixgbe, eicr);
4336                                 break;
4337 
4338                         default:
4339                                 break;
4340                         }
4341                         mutex_exit(&ixgbe->gen_lock);
4342                 }
4343 
4344                 /* re-enable the interrupts which were automasked */
4345                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4346         }
4347 
4348         return (DDI_INTR_CLAIMED);
4349 }
4350 
4351 /*
4352  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4353  *


4714 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4715     int8_t cause)
4716 {
4717         struct ixgbe_hw *hw = &ixgbe->hw;
4718         u32 ivar, index;
4719 
4720         switch (hw->mac.type) {
4721         case ixgbe_mac_82598EB:
4722                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4723                 if (cause == -1) {
4724                         cause = 0;
4725                 }
4726                 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4727                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4728                 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4729                 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4730                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4731                 break;
4732 
4733         case ixgbe_mac_82599EB:

4734                 if (cause == -1) {
4735                         /* other causes */
4736                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4737                         index = (intr_alloc_entry & 1) * 8;
4738                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4739                         ivar &= ~(0xFF << index);
4740                         ivar |= (msix_vector << index);
4741                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4742                 } else {
4743                         /* tx or rx causes */
4744                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4745                         index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4746                         ivar = IXGBE_READ_REG(hw,
4747                             IXGBE_IVAR(intr_alloc_entry >> 1));
4748                         ivar &= ~(0xFF << index);
4749                         ivar |= (msix_vector << index);
4750                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4751                             ivar);
4752                 }
4753                 break;


4767  */
4768 static void
4769 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4770 {
4771         struct ixgbe_hw *hw = &ixgbe->hw;
4772         u32 ivar, index;
4773 
4774         switch (hw->mac.type) {
4775         case ixgbe_mac_82598EB:
4776                 if (cause == -1) {
4777                         cause = 0;
4778                 }
4779                 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4780                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4781                 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4782                     (intr_alloc_entry & 0x3)));
4783                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4784                 break;
4785 
4786         case ixgbe_mac_82599EB:

4787                 if (cause == -1) {
4788                         /* other causes */
4789                         index = (intr_alloc_entry & 1) * 8;
4790                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4791                         ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4792                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4793                 } else {
4794                         /* tx or rx causes */
4795                         index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4796                         ivar = IXGBE_READ_REG(hw,
4797                             IXGBE_IVAR(intr_alloc_entry >> 1));
4798                         ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4799                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4800                             ivar);
4801                 }
4802                 break;
4803 
4804         default:
4805                 break;
4806         }


4816  */
4817 static void
4818 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4819 {
4820         struct ixgbe_hw *hw = &ixgbe->hw;
4821         u32 ivar, index;
4822 
4823         switch (hw->mac.type) {
4824         case ixgbe_mac_82598EB:
4825                 if (cause == -1) {
4826                         cause = 0;
4827                 }
4828                 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4829                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4830                 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4831                     (intr_alloc_entry & 0x3)));
4832                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4833                 break;
4834 
4835         case ixgbe_mac_82599EB:

4836                 if (cause == -1) {
4837                         /* other causes */
4838                         index = (intr_alloc_entry & 1) * 8;
4839                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4840                         ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4841                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4842                 } else {
4843                         /* tx or rx causes */
4844                         index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4845                         ivar = IXGBE_READ_REG(hw,
4846                             IXGBE_IVAR(intr_alloc_entry >> 1));
4847                         ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4848                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4849                             ivar);
4850                 }
4851                 break;
4852 
4853         default:
4854                 break;
4855         }


4858 /*
4859  * Convert the rx ring index driver maintained to the rx ring index
4860  * in h/w.
4861  */
4862 static uint32_t
4863 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4864 {
4865 
4866         struct ixgbe_hw *hw = &ixgbe->hw;
4867         uint32_t rx_ring_per_group, hw_rx_index;
4868 
4869         if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4870             ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4871                 return (sw_rx_index);
4872         } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4873                 switch (hw->mac.type) {
4874                 case ixgbe_mac_82598EB:
4875                         return (sw_rx_index);
4876 
4877                 case ixgbe_mac_82599EB:

4878                         return (sw_rx_index * 2);
4879 
4880                 default:
4881                         break;
4882                 }
4883         } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4884                 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4885 
4886                 switch (hw->mac.type) {
4887                 case ixgbe_mac_82598EB:
4888                         hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4889                             16 + (sw_rx_index % rx_ring_per_group);
4890                         return (hw_rx_index);
4891 
4892                 case ixgbe_mac_82599EB:

4893                         if (ixgbe->num_rx_groups > 32) {
4894                                 hw_rx_index = (sw_rx_index /
4895                                     rx_ring_per_group) * 2 +
4896                                     (sw_rx_index % rx_ring_per_group);
4897                         } else {
4898                                 hw_rx_index = (sw_rx_index /
4899                                     rx_ring_per_group) * 4 +
4900                                     (sw_rx_index % rx_ring_per_group);
4901                         }
4902                         return (hw_rx_index);
4903 
4904                 default:
4905                         break;
4906                 }
4907         }
4908 
4909         /*
4910          * Should never reach. Just to make compiler happy.
4911          */
4912         return (sw_rx_index);


4977  */
4978 static void
4979 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4980 {
4981         struct ixgbe_hw *hw = &ixgbe->hw;
4982         ixgbe_intr_vector_t *vect;      /* vector bitmap */
4983         int r_idx;      /* ring index */
4984         int v_idx;      /* vector index */
4985         uint32_t hw_index;
4986 
4987         /*
4988          * Clear any previous entries
4989          */
4990         switch (hw->mac.type) {
4991         case ixgbe_mac_82598EB:
4992                 for (v_idx = 0; v_idx < 25; v_idx++)
4993                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4994                 break;
4995 
4996         case ixgbe_mac_82599EB:

4997                 for (v_idx = 0; v_idx < 64; v_idx++)
4998                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4999                 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5000                 break;
5001 
5002         default:
5003                 break;
5004         }
5005 
5006         /*
5007          * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5008          * tx rings[0] will use RTxQ[1].
5009          */
5010         if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5011                 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5012                 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5013                 return;
5014         }
5015 
5016         /*




   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
  24  */
  25 
  26 /*
  27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  28  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
  29  * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
  30  */
  31 
  32 #include "ixgbe_sw.h"
  33 
  34 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
  35 static char ixgbe_version[] = "ixgbe 1.1.7";
  36 
  37 /*
  38  * Local function protoypes
  39  */
  40 static int ixgbe_register_mac(ixgbe_t *);
  41 static int ixgbe_identify_hardware(ixgbe_t *);
  42 static int ixgbe_regs_map(ixgbe_t *);
  43 static void ixgbe_init_properties(ixgbe_t *);
  44 static int ixgbe_init_driver_settings(ixgbe_t *);
  45 static void ixgbe_init_locks(ixgbe_t *);
  46 static void ixgbe_destroy_locks(ixgbe_t *);
  47 static int ixgbe_init(ixgbe_t *);
  48 static int ixgbe_chip_start(ixgbe_t *);
  49 static void ixgbe_chip_stop(ixgbe_t *);


 276         0xFF8,          /* maximum interrupt throttle rate */
 277         0,              /* minimum interrupt throttle rate */
 278         200,            /* default interrupt throttle rate */
 279         64,             /* maximum total msix vectors */
 280         16,             /* maximum number of ring vectors */
 281         2,              /* maximum number of other vectors */
 282         (IXGBE_EICR_LSC
 283         | IXGBE_EICR_GPI_SDP1
 284         | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
 285 
 286         (IXGBE_SDP1_GPIEN
 287         | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
 288 
 289         (IXGBE_FLAG_DCA_CAPABLE
 290         | IXGBE_FLAG_RSS_CAPABLE
 291         | IXGBE_FLAG_VMDQ_CAPABLE
 292         | IXGBE_FLAG_RSC_CAPABLE
 293         | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
 294 };
 295 
 296 static adapter_info_t ixgbe_X540_cap = {
 297         128,            /* maximum number of rx queues */
 298         1,              /* minimum number of rx queues */
 299         128,            /* default number of rx queues */
 300         64,             /* maximum number of rx groups */
 301         1,              /* minimum number of rx groups */
 302         1,              /* default number of rx groups */
 303         128,            /* maximum number of tx queues */
 304         1,              /* minimum number of tx queues */
 305         8,              /* default number of tx queues */
 306         15500,          /* maximum MTU size */
 307         0xFF8,          /* maximum interrupt throttle rate */
 308         0,              /* minimum interrupt throttle rate */
 309         200,            /* default interrupt throttle rate */
 310         64,             /* maximum total msix vectors */
 311         16,             /* maximum number of ring vectors */
 312         2,              /* maximum number of other vectors */
 313         (IXGBE_EICR_LSC
 314         | IXGBE_EICR_GPI_SDP1
 315         | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
 316 
 317         (IXGBE_SDP1_GPIEN
 318         | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
 319 
 320         (IXGBE_FLAG_DCA_CAPABLE
 321         | IXGBE_FLAG_RSS_CAPABLE
 322         | IXGBE_FLAG_VMDQ_CAPABLE
 323         | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
 324 };
 325 
 326 /*
 327  * Module Initialization Functions.
 328  */
 329 
 330 int
 331 _init(void)
 332 {
 333         int status;
 334 
 335         mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
 336 
 337         status = mod_install(&ixgbe_modlinkage);
 338 
 339         if (status != DDI_SUCCESS) {
 340                 mac_fini_ops(&ixgbe_dev_ops);
 341         }
 342 
 343         return (status);
 344 }
 345 


 882                 ixgbe->capab = &ixgbe_82598eb_cap;
 883 
 884                 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
 885                         ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
 886                         ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
 887                         ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
 888                 }
 889                 break;
 890 
 891         case ixgbe_mac_82599EB:
 892                 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
 893                 ixgbe->capab = &ixgbe_82599eb_cap;
 894 
 895                 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
 896                         ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
 897                         ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
 898                         ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
 899                 }
 900                 break;
 901 
 902         case ixgbe_mac_X540:
 903                 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
 904                 ixgbe->capab = &ixgbe_X540_cap;
 905                 /*
 906                  * For now, X540 is all set in its capab structure.
 907                  * As other X540 variants show up, things can change here.
 908                  */
 909                 break;
 910 
 911         default:
 912                 IXGBE_DEBUGLOG_1(ixgbe,
 913                     "adapter not supported in ixgbe_identify_hardware(): %d\n",
 914                     hw->mac.type);
 915                 return (IXGBE_FAILURE);
 916         }
 917 
 918         return (IXGBE_SUCCESS);
 919 }
 920 
 921 /*
 922  * ixgbe_regs_map - Map the device registers.
 923  *
 924  */
 925 static int
 926 ixgbe_regs_map(ixgbe_t *ixgbe)
 927 {
 928         dev_info_t *devinfo = ixgbe->dip;
 929         struct ixgbe_hw *hw = &ixgbe->hw;
 930         struct ixgbe_osdep *osdep = &ixgbe->osdep;


1224          */
1225         if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1226                 /*
1227                  * Some PCI-E parts fail the first check due to
1228                  * the link being in sleep state.  Call it again,
1229                  * if it fails a second time it's a real issue.
1230                  */
1231                 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1232                         ixgbe_error(ixgbe,
1233                             "Invalid NVM checksum. Please contact "
1234                             "the vendor to update the NVM.");
1235                         ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1236                         goto init_fail;
1237                 }
1238         }
1239 
1240         /*
1241          * Setup default flow control thresholds - enable/disable
1242          * & flow control type is controlled by ixgbe.conf
1243          */
1244         hw->fc.high_water[0] = DEFAULT_FCRTH;
1245         hw->fc.low_water[0] = DEFAULT_FCRTL;
1246         hw->fc.pause_time = DEFAULT_FCPAUSE;
1247         hw->fc.send_xon = B_TRUE;
1248 
1249         /*
1250          * Initialize link settings
1251          */
1252         (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1253 
1254         /*
1255          * Initialize the chipset hardware
1256          */
1257         if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1258                 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1259                 goto init_fail;
1260         }
1261 
1262         if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1263                 goto init_fail;
1264         }
1265 


2114 
2115         /*
2116          * Setup head & tail pointers
2117          */
2118         IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2119             rx_data->ring_size - 1);
2120         IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2121 
2122         rx_data->rbd_next = 0;
2123         rx_data->lro_first = 0;
2124 
2125         /*
2126          * Setup the Receive Descriptor Control Register (RXDCTL)
2127          * PTHRESH=32 descriptors (half the internal cache)
2128          * HTHRESH=0 descriptors (to minimize latency on fetch)
2129          * WTHRESH defaults to 1 (writeback each descriptor)
2130          */
2131         reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2132         reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2133 
2134         /* Not a valid value for 82599 or X540 */
2135         if (hw->mac.type == ixgbe_mac_82598EB) {
2136                 reg_val |= 0x0020;      /* pthresh */
2137         }
2138         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2139 
2140         if (hw->mac.type == ixgbe_mac_82599EB ||
2141             hw->mac.type == ixgbe_mac_X540) {
2142                 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2143                 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2144                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2145         }
2146 
2147         /*
2148          * Setup the Split and Replication Receive Control Register.
2149          * Set the rx buffer size and the advanced descriptor type.
2150          */
2151         reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2152             IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2153         reg_val |= IXGBE_SRRCTL_DROP_EN;
2154         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2155 }
2156 
2157 static void
2158 ixgbe_setup_rx(ixgbe_t *ixgbe)
2159 {
2160         ixgbe_rx_ring_t *rx_ring;
2161         struct ixgbe_hw *hw = &ixgbe->hw;


2358                  */
2359                 tx_ring->tbd_head_wb = (uint32_t *)
2360                     ((uintptr_t)tx_ring->tbd_area.address + size);
2361                 *tx_ring->tbd_head_wb = 0;
2362 
2363                 buf_low = (uint32_t)
2364                     (tx_ring->tbd_area.dma_address + size);
2365                 buf_high = (uint32_t)
2366                     ((tx_ring->tbd_area.dma_address + size) >> 32);
2367 
2368                 /* Set the head write-back enable bit */
2369                 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2370 
2371                 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2372                 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2373 
2374                 /*
2375                  * Turn off relaxed ordering for head write back or it will
2376                  * cause problems with the tx recycling
2377                  */
2378 
2379                 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2380                     IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2381                     IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2382                 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2383                 if (hw->mac.type == ixgbe_mac_82598EB) {
2384                         IXGBE_WRITE_REG(hw,
2385                             IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2386                 } else {
2387                         IXGBE_WRITE_REG(hw,
2388                             IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2389                 }
2390         } else {
2391                 tx_ring->tbd_head_wb = NULL;
2392         }
2393 
2394         tx_ring->tbd_head = 0;
2395         tx_ring->tbd_tail = 0;
2396         tx_ring->tbd_free = tx_ring->ring_size;
2397 
2398         if (ixgbe->tx_ring_init == B_TRUE) {
2399                 tx_ring->tcb_head = 0;
2400                 tx_ring->tcb_tail = 0;
2401                 tx_ring->tcb_free = tx_ring->free_list_size;
2402         }
2403 
2404         /*
2405          * Initialize the s/w context structure
2406          */
2407         bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2408 }
2409 
2410 static void


2418 
2419         for (i = 0; i < ixgbe->num_tx_rings; i++) {
2420                 tx_ring = &ixgbe->tx_rings[i];
2421                 ixgbe_setup_tx_ring(tx_ring);
2422         }
2423 
2424         /*
2425          * Setup the per-ring statistics mapping.
2426          */
2427         ring_mapping = 0;
2428         for (i = 0; i < ixgbe->num_tx_rings; i++) {
2429                 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2430                 if ((i & 0x3) == 0x3) {
2431                         switch (hw->mac.type) {
2432                         case ixgbe_mac_82598EB:
2433                                 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2434                                     ring_mapping);
2435                                 break;
2436 
2437                         case ixgbe_mac_82599EB:
2438                         case ixgbe_mac_X540:
2439                                 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2440                                     ring_mapping);
2441                                 break;
2442 
2443                         default:
2444                                 break;
2445                         }
2446 
2447                         ring_mapping = 0;
2448                 }
2449         }
2450         if (i & 0x3) {
2451                 switch (hw->mac.type) {
2452                 case ixgbe_mac_82598EB:
2453                         IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2454                         break;
2455 
2456                 case ixgbe_mac_82599EB:
2457                 case ixgbe_mac_X540:
2458                         IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2459                         break;
2460 
2461                 default:
2462                         break;
2463                 }
2464         }
2465 
2466         /*
2467          * Enable CRC appending and TX padding (for short tx frames)
2468          */
2469         reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2470         reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2471         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2472 
2473         /*
2474          * enable DMA for 82599 and X540 parts
2475          */
2476         if (hw->mac.type == ixgbe_mac_82599EB ||
2477             hw->mac.type == ixgbe_mac_X540) {
2478                 /* DMATXCTL.TE must be set after all Tx config is complete */
2479                 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2480                 reg_val |= IXGBE_DMATXCTL_TE;
2481                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2482 
2483                 /* Disable arbiter to set MTQC */
2484                 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2485                 reg_val |= IXGBE_RTTDCS_ARBDIS;
2486                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2487                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2488                 reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2489                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2490         }
2491 
2492         /*
2493          * Enabling tx queues ..
2494          * For 82599 must be done after DMATXCTL.TE is set
2495          */
2496         for (i = 0; i < ixgbe->num_tx_rings; i++) {
2497                 tx_ring = &ixgbe->tx_rings[i];
2498                 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2499                 reg_val |= IXGBE_TXDCTL_ENABLE;
2500                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2501         }
2502 }
2503 
2504 /*
2505  * ixgbe_setup_rss - Setup receive-side scaling feature.
2506  */
2507 static void
2508 ixgbe_setup_rss(ixgbe_t *ixgbe)
2509 {


2569 {
2570         struct ixgbe_hw *hw = &ixgbe->hw;
2571         uint32_t vmdctl, i, vtctl;
2572 
2573         /*
2574          * Setup the VMDq Control register, enable VMDq based on
2575          * packet destination MAC address:
2576          */
2577         switch (hw->mac.type) {
2578         case ixgbe_mac_82598EB:
2579                 /*
2580                  * VMDq Enable = 1;
2581                  * VMDq Filter = 0; MAC filtering
2582                  * Default VMDq output index = 0;
2583                  */
2584                 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2585                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2586                 break;
2587 
2588         case ixgbe_mac_82599EB:
2589         case ixgbe_mac_X540:
2590                 /*
2591                  * Enable VMDq-only.
2592                  */
2593                 vmdctl = IXGBE_MRQC_VMDQEN;
2594                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2595 
2596                 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2597                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2598                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2599                 }
2600 
2601                 /*
2602                  * Enable Virtualization and Replication.
2603                  */
2604                 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2605                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2606 
2607                 /*
2608                  * Enable receiving packets to all VFs
2609                  */


2663                     IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2664                     IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2665                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2666                     IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2667                     IXGBE_MRQC_RSS_FIELD_IPV6 |
2668                     IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2669                     IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2670                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2671                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2672 
2673                 /*
2674                  * Enable and Setup VMDq
2675                  * VMDq Filter = 0; MAC filtering
2676                  * Default VMDq output index = 0;
2677                  */
2678                 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2679                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2680                 break;
2681 
2682         case ixgbe_mac_82599EB:
2683         case ixgbe_mac_X540:
2684                 /*
2685                  * Enable RSS & Setup RSS Hash functions
2686                  */
2687                 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2688                     IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2689                     IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2690                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2691                     IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2692                     IXGBE_MRQC_RSS_FIELD_IPV6 |
2693                     IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2694                     IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2695                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2696 
2697                 /*
2698                  * Enable VMDq+RSS.
2699                  */
2700                 if (ixgbe->num_rx_groups > 32)  {
2701                         mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2702                 } else {
2703                         mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;


2709                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2710                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2711                 }
2712                 break;
2713 
2714         default:
2715                 break;
2716 
2717         }
2718 
2719         /*
2720          * Disable Packet Checksum to enable RSS for multiple receive queues.
2721          * It is an adapter hardware limitation that Packet Checksum is
2722          * mutually exclusive with RSS.
2723          */
2724         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2725         rxcsum |= IXGBE_RXCSUM_PCSD;
2726         rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2727         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2728 
2729         if (hw->mac.type == ixgbe_mac_82599EB ||
2730             hw->mac.type == ixgbe_mac_X540) {
2731                 /*
2732                  * Enable Virtualization and Replication.
2733                  */
2734                 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2735                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2736 
2737                 /*
2738                  * Enable receiving packets to all VFs
2739                  */
2740                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2741                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2742         }
2743 }
2744 
2745 /*
2746  * ixgbe_init_unicst - Initialize the unicast addresses.
2747  */
2748 static void
2749 ixgbe_init_unicst(ixgbe_t *ixgbe)
2750 {


2885  * and save them in the hardware registers.
2886  */
2887 static void
2888 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2889 {
2890         uint8_t *mc_addr_list;
2891         uint32_t mc_addr_count;
2892         struct ixgbe_hw *hw = &ixgbe->hw;
2893 
2894         ASSERT(mutex_owned(&ixgbe->gen_lock));
2895 
2896         ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2897 
2898         mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2899         mc_addr_count = ixgbe->mcast_count;
2900 
2901         /*
2902          * Update the multicast addresses to the MTA registers
2903          */
2904         (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2905             ixgbe_mc_table_itr, TRUE);
2906 }
2907 
2908 /*
2909  * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2910  *
2911  * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2912  * Different chipsets may have different allowed configuration of vmdq and rss.
2913  */
2914 static void
2915 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2916 {
2917         struct ixgbe_hw *hw = &ixgbe->hw;
2918         uint32_t ring_per_group;
2919 
2920         switch (hw->mac.type) {
2921         case ixgbe_mac_82598EB:
2922                 /*
2923                  * 82598 supports the following combination:
2924                  * vmdq no. x rss no.
2925                  * [5..16]  x 1
2926                  * [1..4]   x [1..16]
2927                  * However 8 rss queue per pool (vmdq) is sufficient for
2928                  * most cases.
2929                  */
2930                 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2931                 if (ixgbe->num_rx_groups > 4) {
2932                         ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2933                 } else {
2934                         ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2935                             min(8, ring_per_group);
2936                 }
2937 
2938                 break;
2939 
2940         case ixgbe_mac_82599EB:
2941         case ixgbe_mac_X540:
2942                 /*
2943                  * 82599 supports the following combination:
2944                  * vmdq no. x rss no.
2945                  * [33..64] x [1..2]
2946                  * [2..32]  x [1..4]
2947                  * 1 x [1..16]
2948                  * However 8 rss queue per pool (vmdq) is sufficient for
2949                  * most cases.
2950                  *
2951                  * For now, treat X540 like the 82599.
2952                  */
2953                 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2954                 if (ixgbe->num_rx_groups == 1) {
2955                         ixgbe->num_rx_rings = min(8, ring_per_group);
2956                 } else if (ixgbe->num_rx_groups <= 32) {
2957                         ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2958                             min(4, ring_per_group);
2959                 } else if (ixgbe->num_rx_groups <= 64) {
2960                         ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2961                             min(2, ring_per_group);
2962                 }
2963                 break;
2964 
2965         default:
2966                 break;
2967         }
2968 
2969         ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2970 
2971         if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {


3094          * 1 = force interrupt type MSI-X
3095          * 2 = force interrupt type MSI
3096          * 3 = force interrupt type Legacy
3097          */
3098         ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3099             IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3100 
3101         ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3102             0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3103         ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3104             0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3105         ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3106             0, 1, DEFAULT_LSO_ENABLE);
3107         ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3108             0, 1, DEFAULT_LRO_ENABLE);
3109         ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3110             0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3111         ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3112             PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3113 
3114         /* Head Write Back not recommended for 82599 and X540 */
3115         if (hw->mac.type == ixgbe_mac_82599EB ||
3116             hw->mac.type == ixgbe_mac_X540) {
3117                 ixgbe->tx_head_wb_enable = B_FALSE;
3118         }
3119 
3120         /*
3121          * ixgbe LSO needs the tx h/w checksum support.
3122          * LSO will be disabled if tx h/w checksum is not
3123          * enabled.
3124          */
3125         if (ixgbe->tx_hcksum_enable == B_FALSE) {
3126                 ixgbe->lso_enable = B_FALSE;
3127         }
3128 
3129         /*
3130          * ixgbe LRO needs the rx h/w checksum support.
3131          * LRO will be disabled if rx h/w checksum is not
3132          * enabled.
3133          */
3134         if (ixgbe->rx_hcksum_enable == B_FALSE) {
3135                 ixgbe->lro_enable = B_FALSE;
3136         }
3137 
3138         /*
3139          * ixgbe LRO only been supported by 82599 and X540 now
3140          */
3141         if (hw->mac.type == ixgbe_mac_82598EB) {
3142                 ixgbe->lro_enable = B_FALSE;
3143         }
3144         ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3145             MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3146             DEFAULT_TX_COPY_THRESHOLD);
3147         ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3148             PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3149             MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3150         ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3151             PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3152             MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3153         ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3154             PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3155             MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3156 
3157         ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3158             MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3159             DEFAULT_RX_COPY_THRESHOLD);
3160         ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3161             MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3162             DEFAULT_RX_LIMIT_PER_INTR);
3163 
3164         ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3165             ixgbe->capab->min_intr_throttle,
3166             ixgbe->capab->max_intr_throttle,
3167             ixgbe->capab->def_intr_throttle);
3168         /*
3169          * 82599 and X540 require the interupt throttling rate is
3170          * a multiple of 8. This is enforced by the register
3171          * definiton.
3172          */
3173         if (hw->mac.type == ixgbe_mac_82599EB || hw->mac.type == ixgbe_mac_X540)
3174                 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3175 }
3176 
3177 static void
3178 ixgbe_init_params(ixgbe_t *ixgbe)
3179 {
3180         ixgbe->param_en_10000fdx_cap = 1;
3181         ixgbe->param_en_1000fdx_cap = 1;
3182         ixgbe->param_en_100fdx_cap = 1;
3183         ixgbe->param_adv_10000fdx_cap = 1;
3184         ixgbe->param_adv_1000fdx_cap = 1;
3185         ixgbe->param_adv_100fdx_cap = 1;
3186 
3187         ixgbe->param_pause_cap = 1;
3188         ixgbe->param_asym_pause_cap = 1;
3189         ixgbe->param_rem_fault = 0;
3190 
3191         ixgbe->param_adv_autoneg_cap = 1;
3192         ixgbe->param_adv_pause_cap = 1;
3193         ixgbe->param_adv_asym_pause_cap = 1;


3279 /*
3280  * ixgbe_driver_link_check - Link status processing.
3281  *
3282  * This function can be called in both kernel context and interrupt context
3283  */
3284 static void
3285 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3286 {
3287         struct ixgbe_hw *hw = &ixgbe->hw;
3288         ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3289         boolean_t link_up = B_FALSE;
3290         boolean_t link_changed = B_FALSE;
3291 
3292         ASSERT(mutex_owned(&ixgbe->gen_lock));
3293 
3294         (void) ixgbe_check_link(hw, &speed, &link_up, false);
3295         if (link_up) {
3296                 ixgbe->link_check_complete = B_TRUE;
3297 
3298                 /* Link is up, enable flow control settings */
3299                 (void) ixgbe_fc_enable(hw);
3300 
3301                 /*
3302                  * The Link is up, check whether it was marked as down earlier
3303                  */
3304                 if (ixgbe->link_state != LINK_STATE_UP) {
3305                         switch (speed) {
3306                         case IXGBE_LINK_SPEED_10GB_FULL:
3307                                 ixgbe->link_speed = SPEED_10GB;
3308                                 break;
3309                         case IXGBE_LINK_SPEED_1GB_FULL:
3310                                 ixgbe->link_speed = SPEED_1GB;
3311                                 break;
3312                         case IXGBE_LINK_SPEED_100_FULL:
3313                                 ixgbe->link_speed = SPEED_100;
3314                         }
3315                         ixgbe->link_duplex = LINK_DUPLEX_FULL;
3316                         ixgbe->link_state = LINK_STATE_UP;
3317                         link_changed = B_TRUE;
3318                 }
3319         } else {


3782          */
3783         if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3784                 /* enable autoclear but not on bits 29:20 */
3785                 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3786 
3787                 /* general purpose interrupt enable */
3788                 gpie |= (IXGBE_GPIE_MSIX_MODE
3789                     | IXGBE_GPIE_PBA_SUPPORT
3790                     | IXGBE_GPIE_OCD
3791                     | IXGBE_GPIE_EIAME);
3792         /*
3793          * non-msi-x mode
3794          */
3795         } else {
3796 
3797                 /* disable autoclear, leave gpie at default */
3798                 eiac = 0;
3799 
3800                 /*
3801                  * General purpose interrupt enable.
3802                  * For 82599 or X540, extended interrupt automask enable
3803                  * only in MSI or MSI-X mode
3804                  */
3805                 if ((hw->mac.type == ixgbe_mac_82598EB) ||
3806                     (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3807                         gpie |= IXGBE_GPIE_EIAME;
3808                 }
3809         }
3810 
3811         /* Enable specific "other" interrupt types */
3812         switch (hw->mac.type) {
3813         case ixgbe_mac_82598EB:
3814                 gpie |= ixgbe->capab->other_gpie;
3815                 break;
3816 
3817         case ixgbe_mac_82599EB:
3818         case ixgbe_mac_X540:
3819                 gpie |= ixgbe->capab->other_gpie;
3820 
3821                 /* Enable RSC Delay 8us when LRO enabled  */
3822                 if (ixgbe->lro_enable) {
3823                         gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3824                 }
3825                 break;
3826 
3827         default:
3828                 break;
3829         }
3830 
3831         /* write to interrupt control registers */
3832         IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3833         IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3834         IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3835         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3836         IXGBE_WRITE_FLUSH(hw);
3837 }
3838 


3992                 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3993                     &atlas);
3994                 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3995                 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3996                     atlas);
3997 
3998                 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3999                     &atlas);
4000                 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
4001                 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4002                     atlas);
4003 
4004                 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4005                     &atlas);
4006                 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4007                 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4008                     atlas);
4009                 break;
4010 
4011         case ixgbe_mac_82599EB:
4012         case ixgbe_mac_X540:
4013                 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4014                 reg |= (IXGBE_AUTOC_FLU |
4015                     IXGBE_AUTOC_10G_KX4);
4016                 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4017 
4018                 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4019                     B_FALSE, B_TRUE);
4020                 break;
4021 
4022         default:
4023                 break;
4024         }
4025 }
4026 
4027 #pragma inline(ixgbe_intr_rx_work)
4028 /*
4029  * ixgbe_intr_rx_work - RX processing of ISR.
4030  */
4031 static void
4032 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)


4211                          * Recycle the tx descriptors
4212                          */
4213                         tx_ring = &ixgbe->tx_rings[0];
4214                         tx_ring->tx_recycle(tx_ring);
4215 
4216                         /*
4217                          * Schedule the re-transmit
4218                          */
4219                         tx_reschedule = (tx_ring->reschedule &&
4220                             (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4221                 }
4222 
4223                 /* any interrupt type other than tx/rx */
4224                 if (eicr & ixgbe->capab->other_intr) {
4225                         switch (hw->mac.type) {
4226                         case ixgbe_mac_82598EB:
4227                                 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4228                                 break;
4229 
4230                         case ixgbe_mac_82599EB:
4231                         case ixgbe_mac_X540:
4232                                 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4233                                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4234                                 break;
4235 
4236                         default:
4237                                 break;
4238                         }
4239                         ixgbe_intr_other_work(ixgbe, eicr);
4240                         ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4241                 }
4242 
4243                 mutex_exit(&ixgbe->gen_lock);
4244 
4245                 result = DDI_INTR_CLAIMED;
4246         } else {
4247                 mutex_exit(&ixgbe->gen_lock);
4248 
4249                 /*
4250                  * No interrupt cause bits set: don't claim this interrupt.
4251                  */


4305         if (eicr & 0x1) {
4306                 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4307         }
4308 
4309         /*
4310          * For MSI interrupt, tx rings[0] will use RTxQ[1].
4311          */
4312         if (eicr & 0x2) {
4313                 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4314         }
4315 
4316         /* any interrupt type other than tx/rx */
4317         if (eicr & ixgbe->capab->other_intr) {
4318                 mutex_enter(&ixgbe->gen_lock);
4319                 switch (hw->mac.type) {
4320                 case ixgbe_mac_82598EB:
4321                         ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4322                         break;
4323 
4324                 case ixgbe_mac_82599EB:
4325                 case ixgbe_mac_X540:
4326                         ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4327                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4328                         break;
4329 
4330                 default:
4331                         break;
4332                 }
4333                 ixgbe_intr_other_work(ixgbe, eicr);
4334                 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4335                 mutex_exit(&ixgbe->gen_lock);
4336         }
4337 
4338         /* re-enable the interrupts which were automasked */
4339         IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4340 
4341         return (DDI_INTR_CLAIMED);
4342 }
4343 
4344 /*
4345  * ixgbe_intr_msix - Interrupt handler for MSI-X.


4385                 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4386                     DDI_FM_OK) {
4387                         ddi_fm_service_impact(ixgbe->dip,
4388                             DDI_SERVICE_DEGRADED);
4389                         atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4390                         return (DDI_INTR_CLAIMED);
4391                 }
4392 
4393                 /*
4394                  * Check "other" cause bits: any interrupt type other than tx/rx
4395                  */
4396                 if (eicr & ixgbe->capab->other_intr) {
4397                         mutex_enter(&ixgbe->gen_lock);
4398                         switch (hw->mac.type) {
4399                         case ixgbe_mac_82598EB:
4400                                 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4401                                 ixgbe_intr_other_work(ixgbe, eicr);
4402                                 break;
4403 
4404                         case ixgbe_mac_82599EB:
4405                         case ixgbe_mac_X540:
4406                                 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4407                                 ixgbe_intr_other_work(ixgbe, eicr);
4408                                 break;
4409 
4410                         default:
4411                                 break;
4412                         }
4413                         mutex_exit(&ixgbe->gen_lock);
4414                 }
4415 
4416                 /* re-enable the interrupts which were automasked */
4417                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4418         }
4419 
4420         return (DDI_INTR_CLAIMED);
4421 }
4422 
4423 /*
4424  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4425  *


4786 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4787     int8_t cause)
4788 {
4789         struct ixgbe_hw *hw = &ixgbe->hw;
4790         u32 ivar, index;
4791 
4792         switch (hw->mac.type) {
4793         case ixgbe_mac_82598EB:
4794                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4795                 if (cause == -1) {
4796                         cause = 0;
4797                 }
4798                 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4799                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4800                 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4801                 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4802                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4803                 break;
4804 
4805         case ixgbe_mac_82599EB:
4806         case ixgbe_mac_X540:
4807                 if (cause == -1) {
4808                         /* other causes */
4809                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4810                         index = (intr_alloc_entry & 1) * 8;
4811                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4812                         ivar &= ~(0xFF << index);
4813                         ivar |= (msix_vector << index);
4814                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4815                 } else {
4816                         /* tx or rx causes */
4817                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4818                         index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4819                         ivar = IXGBE_READ_REG(hw,
4820                             IXGBE_IVAR(intr_alloc_entry >> 1));
4821                         ivar &= ~(0xFF << index);
4822                         ivar |= (msix_vector << index);
4823                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4824                             ivar);
4825                 }
4826                 break;


4840  */
4841 static void
4842 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4843 {
4844         struct ixgbe_hw *hw = &ixgbe->hw;
4845         u32 ivar, index;
4846 
4847         switch (hw->mac.type) {
4848         case ixgbe_mac_82598EB:
4849                 if (cause == -1) {
4850                         cause = 0;
4851                 }
4852                 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4853                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4854                 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4855                     (intr_alloc_entry & 0x3)));
4856                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4857                 break;
4858 
4859         case ixgbe_mac_82599EB:
4860         case ixgbe_mac_X540:
4861                 if (cause == -1) {
4862                         /* other causes */
4863                         index = (intr_alloc_entry & 1) * 8;
4864                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4865                         ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4866                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4867                 } else {
4868                         /* tx or rx causes */
4869                         index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4870                         ivar = IXGBE_READ_REG(hw,
4871                             IXGBE_IVAR(intr_alloc_entry >> 1));
4872                         ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4873                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4874                             ivar);
4875                 }
4876                 break;
4877 
4878         default:
4879                 break;
4880         }


4890  */
4891 static void
4892 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4893 {
4894         struct ixgbe_hw *hw = &ixgbe->hw;
4895         u32 ivar, index;
4896 
4897         switch (hw->mac.type) {
4898         case ixgbe_mac_82598EB:
4899                 if (cause == -1) {
4900                         cause = 0;
4901                 }
4902                 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4903                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4904                 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4905                     (intr_alloc_entry & 0x3)));
4906                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4907                 break;
4908 
4909         case ixgbe_mac_82599EB:
4910         case ixgbe_mac_X540:
4911                 if (cause == -1) {
4912                         /* other causes */
4913                         index = (intr_alloc_entry & 1) * 8;
4914                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4915                         ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4916                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4917                 } else {
4918                         /* tx or rx causes */
4919                         index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4920                         ivar = IXGBE_READ_REG(hw,
4921                             IXGBE_IVAR(intr_alloc_entry >> 1));
4922                         ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4923                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4924                             ivar);
4925                 }
4926                 break;
4927 
4928         default:
4929                 break;
4930         }


4933 /*
4934  * Convert the rx ring index driver maintained to the rx ring index
4935  * in h/w.
4936  */
4937 static uint32_t
4938 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4939 {
4940 
4941         struct ixgbe_hw *hw = &ixgbe->hw;
4942         uint32_t rx_ring_per_group, hw_rx_index;
4943 
4944         if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4945             ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4946                 return (sw_rx_index);
4947         } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4948                 switch (hw->mac.type) {
4949                 case ixgbe_mac_82598EB:
4950                         return (sw_rx_index);
4951 
4952                 case ixgbe_mac_82599EB:
4953                 case ixgbe_mac_X540:
4954                         return (sw_rx_index * 2);
4955 
4956                 default:
4957                         break;
4958                 }
4959         } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4960                 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4961 
4962                 switch (hw->mac.type) {
4963                 case ixgbe_mac_82598EB:
4964                         hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4965                             16 + (sw_rx_index % rx_ring_per_group);
4966                         return (hw_rx_index);
4967 
4968                 case ixgbe_mac_82599EB:
4969                 case ixgbe_mac_X540:
4970                         if (ixgbe->num_rx_groups > 32) {
4971                                 hw_rx_index = (sw_rx_index /
4972                                     rx_ring_per_group) * 2 +
4973                                     (sw_rx_index % rx_ring_per_group);
4974                         } else {
4975                                 hw_rx_index = (sw_rx_index /
4976                                     rx_ring_per_group) * 4 +
4977                                     (sw_rx_index % rx_ring_per_group);
4978                         }
4979                         return (hw_rx_index);
4980 
4981                 default:
4982                         break;
4983                 }
4984         }
4985 
4986         /*
4987          * Should never reach. Just to make compiler happy.
4988          */
4989         return (sw_rx_index);


5054  */
5055 static void
5056 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5057 {
5058         struct ixgbe_hw *hw = &ixgbe->hw;
5059         ixgbe_intr_vector_t *vect;      /* vector bitmap */
5060         int r_idx;      /* ring index */
5061         int v_idx;      /* vector index */
5062         uint32_t hw_index;
5063 
5064         /*
5065          * Clear any previous entries
5066          */
5067         switch (hw->mac.type) {
5068         case ixgbe_mac_82598EB:
5069                 for (v_idx = 0; v_idx < 25; v_idx++)
5070                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5071                 break;
5072 
5073         case ixgbe_mac_82599EB:
5074         case ixgbe_mac_X540:
5075                 for (v_idx = 0; v_idx < 64; v_idx++)
5076                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5077                 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5078                 break;
5079 
5080         default:
5081                 break;
5082         }
5083 
5084         /*
5085          * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5086          * tx rings[0] will use RTxQ[1].
5087          */
5088         if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5089                 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5090                 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5091                 return;
5092         }
5093 
5094         /*