8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 */
29
30 #include "ixgbe_sw.h"
31
32 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
33 static char ixgbe_version[] = "ixgbe 1.1.7";
34
35 /*
36 * Local function protoypes
37 */
38 static int ixgbe_register_mac(ixgbe_t *);
39 static int ixgbe_identify_hardware(ixgbe_t *);
40 static int ixgbe_regs_map(ixgbe_t *);
41 static void ixgbe_init_properties(ixgbe_t *);
42 static int ixgbe_init_driver_settings(ixgbe_t *);
43 static void ixgbe_init_locks(ixgbe_t *);
44 static void ixgbe_destroy_locks(ixgbe_t *);
45 static int ixgbe_init(ixgbe_t *);
46 static int ixgbe_chip_start(ixgbe_t *);
47 static void ixgbe_chip_stop(ixgbe_t *);
274 0xFF8, /* maximum interrupt throttle rate */
275 0, /* minimum interrupt throttle rate */
276 200, /* default interrupt throttle rate */
277 64, /* maximum total msix vectors */
278 16, /* maximum number of ring vectors */
279 2, /* maximum number of other vectors */
280 (IXGBE_EICR_LSC
281 | IXGBE_EICR_GPI_SDP1
282 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
283
284 (IXGBE_SDP1_GPIEN
285 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
286
287 (IXGBE_FLAG_DCA_CAPABLE
288 | IXGBE_FLAG_RSS_CAPABLE
289 | IXGBE_FLAG_VMDQ_CAPABLE
290 | IXGBE_FLAG_RSC_CAPABLE
291 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
292 };
293
294 /*
295 * Module Initialization Functions.
296 */
297
298 int
299 _init(void)
300 {
301 int status;
302
303 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
304
305 status = mod_install(&ixgbe_modlinkage);
306
307 if (status != DDI_SUCCESS) {
308 mac_fini_ops(&ixgbe_dev_ops);
309 }
310
311 return (status);
312 }
313
850 ixgbe->capab = &ixgbe_82598eb_cap;
851
852 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
853 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
854 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
855 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
856 }
857 break;
858
859 case ixgbe_mac_82599EB:
860 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
861 ixgbe->capab = &ixgbe_82599eb_cap;
862
863 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
864 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
865 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
866 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
867 }
868 break;
869
870 default:
871 IXGBE_DEBUGLOG_1(ixgbe,
872 "adapter not supported in ixgbe_identify_hardware(): %d\n",
873 hw->mac.type);
874 return (IXGBE_FAILURE);
875 }
876
877 return (IXGBE_SUCCESS);
878 }
879
880 /*
881 * ixgbe_regs_map - Map the device registers.
882 *
883 */
884 static int
885 ixgbe_regs_map(ixgbe_t *ixgbe)
886 {
887 dev_info_t *devinfo = ixgbe->dip;
888 struct ixgbe_hw *hw = &ixgbe->hw;
889 struct ixgbe_osdep *osdep = &ixgbe->osdep;
1183 */
1184 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1185 /*
1186 * Some PCI-E parts fail the first check due to
1187 * the link being in sleep state. Call it again,
1188 * if it fails a second time it's a real issue.
1189 */
1190 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1191 ixgbe_error(ixgbe,
1192 "Invalid NVM checksum. Please contact "
1193 "the vendor to update the NVM.");
1194 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1195 goto init_fail;
1196 }
1197 }
1198
1199 /*
1200 * Setup default flow control thresholds - enable/disable
1201 * & flow control type is controlled by ixgbe.conf
1202 */
1203 hw->fc.high_water = DEFAULT_FCRTH;
1204 hw->fc.low_water = DEFAULT_FCRTL;
1205 hw->fc.pause_time = DEFAULT_FCPAUSE;
1206 hw->fc.send_xon = B_TRUE;
1207
1208 /*
1209 * Initialize link settings
1210 */
1211 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1212
1213 /*
1214 * Initialize the chipset hardware
1215 */
1216 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1217 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1218 goto init_fail;
1219 }
1220
1221 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1222 goto init_fail;
1223 }
1224
2079 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2080
2081 rx_data->rbd_next = 0;
2082 rx_data->lro_first = 0;
2083
2084 /*
2085 * Setup the Receive Descriptor Control Register (RXDCTL)
2086 * PTHRESH=32 descriptors (half the internal cache)
2087 * HTHRESH=0 descriptors (to minimize latency on fetch)
2088 * WTHRESH defaults to 1 (writeback each descriptor)
2089 */
2090 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2091 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2092
2093 /* Not a valid value for 82599 */
2094 if (hw->mac.type < ixgbe_mac_82599EB) {
2095 reg_val |= 0x0020; /* pthresh */
2096 }
2097 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2098
2099 if (hw->mac.type == ixgbe_mac_82599EB) {
2100 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2101 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2102 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2103 }
2104
2105 /*
2106 * Setup the Split and Replication Receive Control Register.
2107 * Set the rx buffer size and the advanced descriptor type.
2108 */
2109 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2110 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2111 reg_val |= IXGBE_SRRCTL_DROP_EN;
2112 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2113 }
2114
2115 static void
2116 ixgbe_setup_rx(ixgbe_t *ixgbe)
2117 {
2118 ixgbe_rx_ring_t *rx_ring;
2119 struct ixgbe_hw *hw = &ixgbe->hw;
2316 */
2317 tx_ring->tbd_head_wb = (uint32_t *)
2318 ((uintptr_t)tx_ring->tbd_area.address + size);
2319 *tx_ring->tbd_head_wb = 0;
2320
2321 buf_low = (uint32_t)
2322 (tx_ring->tbd_area.dma_address + size);
2323 buf_high = (uint32_t)
2324 ((tx_ring->tbd_area.dma_address + size) >> 32);
2325
2326 /* Set the head write-back enable bit */
2327 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2328
2329 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2330 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2331
2332 /*
2333 * Turn off relaxed ordering for head write back or it will
2334 * cause problems with the tx recycling
2335 */
2336 reg_val = IXGBE_READ_REG(hw,
2337 IXGBE_DCA_TXCTRL(tx_ring->index));
2338 reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2339 IXGBE_WRITE_REG(hw,
2340 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2341 } else {
2342 tx_ring->tbd_head_wb = NULL;
2343 }
2344
2345 tx_ring->tbd_head = 0;
2346 tx_ring->tbd_tail = 0;
2347 tx_ring->tbd_free = tx_ring->ring_size;
2348
2349 if (ixgbe->tx_ring_init == B_TRUE) {
2350 tx_ring->tcb_head = 0;
2351 tx_ring->tcb_tail = 0;
2352 tx_ring->tcb_free = tx_ring->free_list_size;
2353 }
2354
2355 /*
2356 * Initialize the s/w context structure
2357 */
2358 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2359 }
2360
2361 static void
2369
2370 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2371 tx_ring = &ixgbe->tx_rings[i];
2372 ixgbe_setup_tx_ring(tx_ring);
2373 }
2374
2375 /*
2376 * Setup the per-ring statistics mapping.
2377 */
2378 ring_mapping = 0;
2379 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2380 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2381 if ((i & 0x3) == 0x3) {
2382 switch (hw->mac.type) {
2383 case ixgbe_mac_82598EB:
2384 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2385 ring_mapping);
2386 break;
2387
2388 case ixgbe_mac_82599EB:
2389 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2390 ring_mapping);
2391 break;
2392
2393 default:
2394 break;
2395 }
2396
2397 ring_mapping = 0;
2398 }
2399 }
2400 if (i & 0x3) {
2401 switch (hw->mac.type) {
2402 case ixgbe_mac_82598EB:
2403 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2404 break;
2405
2406 case ixgbe_mac_82599EB:
2407 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2408 break;
2409
2410 default:
2411 break;
2412 }
2413 }
2414
2415 /*
2416 * Enable CRC appending and TX padding (for short tx frames)
2417 */
2418 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2419 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2420 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2421
2422 /*
2423 * enable DMA for 82599 parts
2424 */
2425 if (hw->mac.type == ixgbe_mac_82599EB) {
2426 /* DMATXCTL.TE must be set after all Tx config is complete */
2427 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2428 reg_val |= IXGBE_DMATXCTL_TE;
2429 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2430 }
2431
2432 /*
2433 * Enabling tx queues ..
2434 * For 82599 must be done after DMATXCTL.TE is set
2435 */
2436 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2437 tx_ring = &ixgbe->tx_rings[i];
2438 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2439 reg_val |= IXGBE_TXDCTL_ENABLE;
2440 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2441 }
2442 }
2443
2444 /*
2445 * ixgbe_setup_rss - Setup receive-side scaling feature.
2446 */
2447 static void
2448 ixgbe_setup_rss(ixgbe_t *ixgbe)
2449 {
2509 {
2510 struct ixgbe_hw *hw = &ixgbe->hw;
2511 uint32_t vmdctl, i, vtctl;
2512
2513 /*
2514 * Setup the VMDq Control register, enable VMDq based on
2515 * packet destination MAC address:
2516 */
2517 switch (hw->mac.type) {
2518 case ixgbe_mac_82598EB:
2519 /*
2520 * VMDq Enable = 1;
2521 * VMDq Filter = 0; MAC filtering
2522 * Default VMDq output index = 0;
2523 */
2524 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2525 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2526 break;
2527
2528 case ixgbe_mac_82599EB:
2529 /*
2530 * Enable VMDq-only.
2531 */
2532 vmdctl = IXGBE_MRQC_VMDQEN;
2533 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2534
2535 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2536 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2537 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2538 }
2539
2540 /*
2541 * Enable Virtualization and Replication.
2542 */
2543 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2544 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2545
2546 /*
2547 * Enable receiving packets to all VFs
2548 */
2602 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2603 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2604 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2605 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2606 IXGBE_MRQC_RSS_FIELD_IPV6 |
2607 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2608 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2609 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2610 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2611
2612 /*
2613 * Enable and Setup VMDq
2614 * VMDq Filter = 0; MAC filtering
2615 * Default VMDq output index = 0;
2616 */
2617 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2618 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2619 break;
2620
2621 case ixgbe_mac_82599EB:
2622 /*
2623 * Enable RSS & Setup RSS Hash functions
2624 */
2625 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2626 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2627 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2628 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2629 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2630 IXGBE_MRQC_RSS_FIELD_IPV6 |
2631 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2632 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2633 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2634
2635 /*
2636 * Enable VMDq+RSS.
2637 */
2638 if (ixgbe->num_rx_groups > 32) {
2639 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2640 } else {
2641 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2647 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2648 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2649 }
2650 break;
2651
2652 default:
2653 break;
2654
2655 }
2656
2657 /*
2658 * Disable Packet Checksum to enable RSS for multiple receive queues.
2659 * It is an adapter hardware limitation that Packet Checksum is
2660 * mutually exclusive with RSS.
2661 */
2662 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2663 rxcsum |= IXGBE_RXCSUM_PCSD;
2664 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2665 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2666
2667 if (hw->mac.type == ixgbe_mac_82599EB) {
2668 /*
2669 * Enable Virtualization and Replication.
2670 */
2671 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2672 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2673
2674 /*
2675 * Enable receiving packets to all VFs
2676 */
2677 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2678 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2679 }
2680 }
2681
2682 /*
2683 * ixgbe_init_unicst - Initialize the unicast addresses.
2684 */
2685 static void
2686 ixgbe_init_unicst(ixgbe_t *ixgbe)
2687 {
2822 * and save them in the hardware registers.
2823 */
2824 static void
2825 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2826 {
2827 uint8_t *mc_addr_list;
2828 uint32_t mc_addr_count;
2829 struct ixgbe_hw *hw = &ixgbe->hw;
2830
2831 ASSERT(mutex_owned(&ixgbe->gen_lock));
2832
2833 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2834
2835 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2836 mc_addr_count = ixgbe->mcast_count;
2837
2838 /*
2839 * Update the multicast addresses to the MTA registers
2840 */
2841 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2842 ixgbe_mc_table_itr);
2843 }
2844
2845 /*
2846 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2847 *
2848 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2849 * Different chipsets may have different allowed configuration of vmdq and rss.
2850 */
2851 static void
2852 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2853 {
2854 struct ixgbe_hw *hw = &ixgbe->hw;
2855 uint32_t ring_per_group;
2856
2857 switch (hw->mac.type) {
2858 case ixgbe_mac_82598EB:
2859 /*
2860 * 82598 supports the following combination:
2861 * vmdq no. x rss no.
2862 * [5..16] x 1
2863 * [1..4] x [1..16]
2864 * However 8 rss queue per pool (vmdq) is sufficient for
2865 * most cases.
2866 */
2867 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2868 if (ixgbe->num_rx_groups > 4) {
2869 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2870 } else {
2871 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2872 min(8, ring_per_group);
2873 }
2874
2875 break;
2876
2877 case ixgbe_mac_82599EB:
2878 /*
2879 * 82599 supports the following combination:
2880 * vmdq no. x rss no.
2881 * [33..64] x [1..2]
2882 * [2..32] x [1..4]
2883 * 1 x [1..16]
2884 * However 8 rss queue per pool (vmdq) is sufficient for
2885 * most cases.
2886 */
2887 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2888 if (ixgbe->num_rx_groups == 1) {
2889 ixgbe->num_rx_rings = min(8, ring_per_group);
2890 } else if (ixgbe->num_rx_groups <= 32) {
2891 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2892 min(4, ring_per_group);
2893 } else if (ixgbe->num_rx_groups <= 64) {
2894 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2895 min(2, ring_per_group);
2896 }
2897 break;
2898
2899 default:
2900 break;
2901 }
2902
2903 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2904
2905 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
3028 * 1 = force interrupt type MSI-X
3029 * 2 = force interrupt type MSI
3030 * 3 = force interrupt type Legacy
3031 */
3032 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3033 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3034
3035 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3036 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3037 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3038 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3039 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3040 0, 1, DEFAULT_LSO_ENABLE);
3041 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3042 0, 1, DEFAULT_LRO_ENABLE);
3043 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3044 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3045 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3046 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3047
3048 /* Head Write Back not recommended for 82599 */
3049 if (hw->mac.type >= ixgbe_mac_82599EB) {
3050 ixgbe->tx_head_wb_enable = B_FALSE;
3051 }
3052
3053 /*
3054 * ixgbe LSO needs the tx h/w checksum support.
3055 * LSO will be disabled if tx h/w checksum is not
3056 * enabled.
3057 */
3058 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3059 ixgbe->lso_enable = B_FALSE;
3060 }
3061
3062 /*
3063 * ixgbe LRO needs the rx h/w checksum support.
3064 * LRO will be disabled if rx h/w checksum is not
3065 * enabled.
3066 */
3067 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3068 ixgbe->lro_enable = B_FALSE;
3069 }
3070
3071 /*
3072 * ixgbe LRO only been supported by 82599 now
3073 */
3074 if (hw->mac.type != ixgbe_mac_82599EB) {
3075 ixgbe->lro_enable = B_FALSE;
3076 }
3077 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3078 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3079 DEFAULT_TX_COPY_THRESHOLD);
3080 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3081 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3082 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3083 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3084 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3085 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3086 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3087 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3088 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3089
3090 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3091 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3092 DEFAULT_RX_COPY_THRESHOLD);
3093 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3094 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3095 DEFAULT_RX_LIMIT_PER_INTR);
3096
3097 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3098 ixgbe->capab->min_intr_throttle,
3099 ixgbe->capab->max_intr_throttle,
3100 ixgbe->capab->def_intr_throttle);
3101 /*
3102 * 82599 requires the interupt throttling rate is
3103 * a multiple of 8. This is enforced by the register
3104 * definiton.
3105 */
3106 if (hw->mac.type == ixgbe_mac_82599EB)
3107 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3108 }
3109
3110 static void
3111 ixgbe_init_params(ixgbe_t *ixgbe)
3112 {
3113 ixgbe->param_en_10000fdx_cap = 1;
3114 ixgbe->param_en_1000fdx_cap = 1;
3115 ixgbe->param_en_100fdx_cap = 1;
3116 ixgbe->param_adv_10000fdx_cap = 1;
3117 ixgbe->param_adv_1000fdx_cap = 1;
3118 ixgbe->param_adv_100fdx_cap = 1;
3119
3120 ixgbe->param_pause_cap = 1;
3121 ixgbe->param_asym_pause_cap = 1;
3122 ixgbe->param_rem_fault = 0;
3123
3124 ixgbe->param_adv_autoneg_cap = 1;
3125 ixgbe->param_adv_pause_cap = 1;
3126 ixgbe->param_adv_asym_pause_cap = 1;
3212 /*
3213 * ixgbe_driver_link_check - Link status processing.
3214 *
3215 * This function can be called in both kernel context and interrupt context
3216 */
3217 static void
3218 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3219 {
3220 struct ixgbe_hw *hw = &ixgbe->hw;
3221 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3222 boolean_t link_up = B_FALSE;
3223 boolean_t link_changed = B_FALSE;
3224
3225 ASSERT(mutex_owned(&ixgbe->gen_lock));
3226
3227 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3228 if (link_up) {
3229 ixgbe->link_check_complete = B_TRUE;
3230
3231 /* Link is up, enable flow control settings */
3232 (void) ixgbe_fc_enable(hw, 0);
3233
3234 /*
3235 * The Link is up, check whether it was marked as down earlier
3236 */
3237 if (ixgbe->link_state != LINK_STATE_UP) {
3238 switch (speed) {
3239 case IXGBE_LINK_SPEED_10GB_FULL:
3240 ixgbe->link_speed = SPEED_10GB;
3241 break;
3242 case IXGBE_LINK_SPEED_1GB_FULL:
3243 ixgbe->link_speed = SPEED_1GB;
3244 break;
3245 case IXGBE_LINK_SPEED_100_FULL:
3246 ixgbe->link_speed = SPEED_100;
3247 }
3248 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3249 ixgbe->link_state = LINK_STATE_UP;
3250 link_changed = B_TRUE;
3251 }
3252 } else {
3731 eiac = 0;
3732
3733 /*
3734 * General purpose interrupt enable.
3735 * For 82599, extended interrupt automask enable
3736 * only in MSI or MSI-X mode
3737 */
3738 if ((hw->mac.type < ixgbe_mac_82599EB) ||
3739 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3740 gpie |= IXGBE_GPIE_EIAME;
3741 }
3742 }
3743
3744 /* Enable specific "other" interrupt types */
3745 switch (hw->mac.type) {
3746 case ixgbe_mac_82598EB:
3747 gpie |= ixgbe->capab->other_gpie;
3748 break;
3749
3750 case ixgbe_mac_82599EB:
3751 gpie |= ixgbe->capab->other_gpie;
3752
3753 /* Enable RSC Delay 8us when LRO enabled */
3754 if (ixgbe->lro_enable) {
3755 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3756 }
3757 break;
3758
3759 default:
3760 break;
3761 }
3762
3763 /* write to interrupt control registers */
3764 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3765 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3766 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3767 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3768 IXGBE_WRITE_FLUSH(hw);
3769 }
3770
3924 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3925 &atlas);
3926 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3927 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3928 atlas);
3929
3930 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3931 &atlas);
3932 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3933 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3934 atlas);
3935
3936 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3937 &atlas);
3938 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3939 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3940 atlas);
3941 break;
3942
3943 case ixgbe_mac_82599EB:
3944 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3945 reg |= (IXGBE_AUTOC_FLU |
3946 IXGBE_AUTOC_10G_KX4);
3947 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3948
3949 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
3950 B_FALSE, B_TRUE);
3951 break;
3952
3953 default:
3954 break;
3955 }
3956 }
3957
3958 #pragma inline(ixgbe_intr_rx_work)
3959 /*
3960 * ixgbe_intr_rx_work - RX processing of ISR.
3961 */
3962 static void
3963 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4142 * Recycle the tx descriptors
4143 */
4144 tx_ring = &ixgbe->tx_rings[0];
4145 tx_ring->tx_recycle(tx_ring);
4146
4147 /*
4148 * Schedule the re-transmit
4149 */
4150 tx_reschedule = (tx_ring->reschedule &&
4151 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4152 }
4153
4154 /* any interrupt type other than tx/rx */
4155 if (eicr & ixgbe->capab->other_intr) {
4156 switch (hw->mac.type) {
4157 case ixgbe_mac_82598EB:
4158 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4159 break;
4160
4161 case ixgbe_mac_82599EB:
4162 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4163 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4164 break;
4165
4166 default:
4167 break;
4168 }
4169 ixgbe_intr_other_work(ixgbe, eicr);
4170 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4171 }
4172
4173 mutex_exit(&ixgbe->gen_lock);
4174
4175 result = DDI_INTR_CLAIMED;
4176 } else {
4177 mutex_exit(&ixgbe->gen_lock);
4178
4179 /*
4180 * No interrupt cause bits set: don't claim this interrupt.
4181 */
4235 if (eicr & 0x1) {
4236 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4237 }
4238
4239 /*
4240 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4241 */
4242 if (eicr & 0x2) {
4243 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4244 }
4245
4246 /* any interrupt type other than tx/rx */
4247 if (eicr & ixgbe->capab->other_intr) {
4248 mutex_enter(&ixgbe->gen_lock);
4249 switch (hw->mac.type) {
4250 case ixgbe_mac_82598EB:
4251 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4252 break;
4253
4254 case ixgbe_mac_82599EB:
4255 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4256 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4257 break;
4258
4259 default:
4260 break;
4261 }
4262 ixgbe_intr_other_work(ixgbe, eicr);
4263 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4264 mutex_exit(&ixgbe->gen_lock);
4265 }
4266
4267 /* re-enable the interrupts which were automasked */
4268 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4269
4270 return (DDI_INTR_CLAIMED);
4271 }
4272
4273 /*
4274 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4314 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4315 DDI_FM_OK) {
4316 ddi_fm_service_impact(ixgbe->dip,
4317 DDI_SERVICE_DEGRADED);
4318 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4319 return (DDI_INTR_CLAIMED);
4320 }
4321
4322 /*
4323 * Check "other" cause bits: any interrupt type other than tx/rx
4324 */
4325 if (eicr & ixgbe->capab->other_intr) {
4326 mutex_enter(&ixgbe->gen_lock);
4327 switch (hw->mac.type) {
4328 case ixgbe_mac_82598EB:
4329 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4330 ixgbe_intr_other_work(ixgbe, eicr);
4331 break;
4332
4333 case ixgbe_mac_82599EB:
4334 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4335 ixgbe_intr_other_work(ixgbe, eicr);
4336 break;
4337
4338 default:
4339 break;
4340 }
4341 mutex_exit(&ixgbe->gen_lock);
4342 }
4343
4344 /* re-enable the interrupts which were automasked */
4345 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4346 }
4347
4348 return (DDI_INTR_CLAIMED);
4349 }
4350
4351 /*
4352 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4353 *
4714 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4715 int8_t cause)
4716 {
4717 struct ixgbe_hw *hw = &ixgbe->hw;
4718 u32 ivar, index;
4719
4720 switch (hw->mac.type) {
4721 case ixgbe_mac_82598EB:
4722 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4723 if (cause == -1) {
4724 cause = 0;
4725 }
4726 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4727 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4728 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4729 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4730 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4731 break;
4732
4733 case ixgbe_mac_82599EB:
4734 if (cause == -1) {
4735 /* other causes */
4736 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4737 index = (intr_alloc_entry & 1) * 8;
4738 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4739 ivar &= ~(0xFF << index);
4740 ivar |= (msix_vector << index);
4741 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4742 } else {
4743 /* tx or rx causes */
4744 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4745 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4746 ivar = IXGBE_READ_REG(hw,
4747 IXGBE_IVAR(intr_alloc_entry >> 1));
4748 ivar &= ~(0xFF << index);
4749 ivar |= (msix_vector << index);
4750 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4751 ivar);
4752 }
4753 break;
4767 */
4768 static void
4769 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4770 {
4771 struct ixgbe_hw *hw = &ixgbe->hw;
4772 u32 ivar, index;
4773
4774 switch (hw->mac.type) {
4775 case ixgbe_mac_82598EB:
4776 if (cause == -1) {
4777 cause = 0;
4778 }
4779 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4780 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4781 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4782 (intr_alloc_entry & 0x3)));
4783 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4784 break;
4785
4786 case ixgbe_mac_82599EB:
4787 if (cause == -1) {
4788 /* other causes */
4789 index = (intr_alloc_entry & 1) * 8;
4790 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4791 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4792 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4793 } else {
4794 /* tx or rx causes */
4795 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4796 ivar = IXGBE_READ_REG(hw,
4797 IXGBE_IVAR(intr_alloc_entry >> 1));
4798 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4799 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4800 ivar);
4801 }
4802 break;
4803
4804 default:
4805 break;
4806 }
4816 */
4817 static void
4818 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4819 {
4820 struct ixgbe_hw *hw = &ixgbe->hw;
4821 u32 ivar, index;
4822
4823 switch (hw->mac.type) {
4824 case ixgbe_mac_82598EB:
4825 if (cause == -1) {
4826 cause = 0;
4827 }
4828 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4829 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4830 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4831 (intr_alloc_entry & 0x3)));
4832 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4833 break;
4834
4835 case ixgbe_mac_82599EB:
4836 if (cause == -1) {
4837 /* other causes */
4838 index = (intr_alloc_entry & 1) * 8;
4839 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4840 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4841 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4842 } else {
4843 /* tx or rx causes */
4844 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4845 ivar = IXGBE_READ_REG(hw,
4846 IXGBE_IVAR(intr_alloc_entry >> 1));
4847 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4848 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4849 ivar);
4850 }
4851 break;
4852
4853 default:
4854 break;
4855 }
4858 /*
4859 * Convert the rx ring index driver maintained to the rx ring index
4860 * in h/w.
4861 */
4862 static uint32_t
4863 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4864 {
4865
4866 struct ixgbe_hw *hw = &ixgbe->hw;
4867 uint32_t rx_ring_per_group, hw_rx_index;
4868
4869 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4870 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4871 return (sw_rx_index);
4872 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4873 switch (hw->mac.type) {
4874 case ixgbe_mac_82598EB:
4875 return (sw_rx_index);
4876
4877 case ixgbe_mac_82599EB:
4878 return (sw_rx_index * 2);
4879
4880 default:
4881 break;
4882 }
4883 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4884 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4885
4886 switch (hw->mac.type) {
4887 case ixgbe_mac_82598EB:
4888 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4889 16 + (sw_rx_index % rx_ring_per_group);
4890 return (hw_rx_index);
4891
4892 case ixgbe_mac_82599EB:
4893 if (ixgbe->num_rx_groups > 32) {
4894 hw_rx_index = (sw_rx_index /
4895 rx_ring_per_group) * 2 +
4896 (sw_rx_index % rx_ring_per_group);
4897 } else {
4898 hw_rx_index = (sw_rx_index /
4899 rx_ring_per_group) * 4 +
4900 (sw_rx_index % rx_ring_per_group);
4901 }
4902 return (hw_rx_index);
4903
4904 default:
4905 break;
4906 }
4907 }
4908
4909 /*
4910 * Should never reach. Just to make compiler happy.
4911 */
4912 return (sw_rx_index);
4977 */
4978 static void
4979 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4980 {
4981 struct ixgbe_hw *hw = &ixgbe->hw;
4982 ixgbe_intr_vector_t *vect; /* vector bitmap */
4983 int r_idx; /* ring index */
4984 int v_idx; /* vector index */
4985 uint32_t hw_index;
4986
4987 /*
4988 * Clear any previous entries
4989 */
4990 switch (hw->mac.type) {
4991 case ixgbe_mac_82598EB:
4992 for (v_idx = 0; v_idx < 25; v_idx++)
4993 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4994 break;
4995
4996 case ixgbe_mac_82599EB:
4997 for (v_idx = 0; v_idx < 64; v_idx++)
4998 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4999 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5000 break;
5001
5002 default:
5003 break;
5004 }
5005
5006 /*
5007 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5008 * tx rings[0] will use RTxQ[1].
5009 */
5010 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5011 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5012 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5013 return;
5014 }
5015
5016 /*
|
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
30 */
31
32 #include "ixgbe_sw.h"
33
34 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
35 static char ixgbe_version[] = "ixgbe 1.1.7";
36
37 /*
38 * Local function protoypes
39 */
40 static int ixgbe_register_mac(ixgbe_t *);
41 static int ixgbe_identify_hardware(ixgbe_t *);
42 static int ixgbe_regs_map(ixgbe_t *);
43 static void ixgbe_init_properties(ixgbe_t *);
44 static int ixgbe_init_driver_settings(ixgbe_t *);
45 static void ixgbe_init_locks(ixgbe_t *);
46 static void ixgbe_destroy_locks(ixgbe_t *);
47 static int ixgbe_init(ixgbe_t *);
48 static int ixgbe_chip_start(ixgbe_t *);
49 static void ixgbe_chip_stop(ixgbe_t *);
276 0xFF8, /* maximum interrupt throttle rate */
277 0, /* minimum interrupt throttle rate */
278 200, /* default interrupt throttle rate */
279 64, /* maximum total msix vectors */
280 16, /* maximum number of ring vectors */
281 2, /* maximum number of other vectors */
282 (IXGBE_EICR_LSC
283 | IXGBE_EICR_GPI_SDP1
284 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
285
286 (IXGBE_SDP1_GPIEN
287 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
288
289 (IXGBE_FLAG_DCA_CAPABLE
290 | IXGBE_FLAG_RSS_CAPABLE
291 | IXGBE_FLAG_VMDQ_CAPABLE
292 | IXGBE_FLAG_RSC_CAPABLE
293 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
294 };
295
296 static adapter_info_t ixgbe_X540_cap = {
297 128, /* maximum number of rx queues */
298 1, /* minimum number of rx queues */
299 128, /* default number of rx queues */
300 64, /* maximum number of rx groups */
301 1, /* minimum number of rx groups */
302 1, /* default number of rx groups */
303 128, /* maximum number of tx queues */
304 1, /* minimum number of tx queues */
305 8, /* default number of tx queues */
306 15500, /* maximum MTU size */
307 0xFF8, /* maximum interrupt throttle rate */
308 0, /* minimum interrupt throttle rate */
309 200, /* default interrupt throttle rate */
310 64, /* maximum total msix vectors */
311 16, /* maximum number of ring vectors */
312 2, /* maximum number of other vectors */
313 (IXGBE_EICR_LSC
314 | IXGBE_EICR_GPI_SDP1
315 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
316
317 (IXGBE_SDP1_GPIEN
318 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
319
320 (IXGBE_FLAG_DCA_CAPABLE
321 | IXGBE_FLAG_RSS_CAPABLE
322 | IXGBE_FLAG_VMDQ_CAPABLE
323 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
324 };
325
326 /*
327 * Module Initialization Functions.
328 */
329
330 int
331 _init(void)
332 {
333 int status;
334
335 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
336
337 status = mod_install(&ixgbe_modlinkage);
338
339 if (status != DDI_SUCCESS) {
340 mac_fini_ops(&ixgbe_dev_ops);
341 }
342
343 return (status);
344 }
345
882 ixgbe->capab = &ixgbe_82598eb_cap;
883
884 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
885 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
886 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
887 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
888 }
889 break;
890
891 case ixgbe_mac_82599EB:
892 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
893 ixgbe->capab = &ixgbe_82599eb_cap;
894
895 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
896 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
897 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
898 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
899 }
900 break;
901
902 case ixgbe_mac_X540:
903 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
904 ixgbe->capab = &ixgbe_X540_cap;
905 /*
906 * For now, X540 is all set in its capab structure.
907 * As other X540 variants show up, things can change here.
908 */
909 break;
910
911 default:
912 IXGBE_DEBUGLOG_1(ixgbe,
913 "adapter not supported in ixgbe_identify_hardware(): %d\n",
914 hw->mac.type);
915 return (IXGBE_FAILURE);
916 }
917
918 return (IXGBE_SUCCESS);
919 }
920
921 /*
922 * ixgbe_regs_map - Map the device registers.
923 *
924 */
925 static int
926 ixgbe_regs_map(ixgbe_t *ixgbe)
927 {
928 dev_info_t *devinfo = ixgbe->dip;
929 struct ixgbe_hw *hw = &ixgbe->hw;
930 struct ixgbe_osdep *osdep = &ixgbe->osdep;
1224 */
1225 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1226 /*
1227 * Some PCI-E parts fail the first check due to
1228 * the link being in sleep state. Call it again,
1229 * if it fails a second time it's a real issue.
1230 */
1231 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1232 ixgbe_error(ixgbe,
1233 "Invalid NVM checksum. Please contact "
1234 "the vendor to update the NVM.");
1235 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1236 goto init_fail;
1237 }
1238 }
1239
1240 /*
1241 * Setup default flow control thresholds - enable/disable
1242 * & flow control type is controlled by ixgbe.conf
1243 */
1244 hw->fc.high_water[0] = DEFAULT_FCRTH;
1245 hw->fc.low_water[0] = DEFAULT_FCRTL;
1246 hw->fc.pause_time = DEFAULT_FCPAUSE;
1247 hw->fc.send_xon = B_TRUE;
1248
1249 /*
1250 * Initialize link settings
1251 */
1252 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1253
1254 /*
1255 * Initialize the chipset hardware
1256 */
1257 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1258 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1259 goto init_fail;
1260 }
1261
1262 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1263 goto init_fail;
1264 }
1265
2120 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2121
2122 rx_data->rbd_next = 0;
2123 rx_data->lro_first = 0;
2124
2125 /*
2126 * Setup the Receive Descriptor Control Register (RXDCTL)
2127 * PTHRESH=32 descriptors (half the internal cache)
2128 * HTHRESH=0 descriptors (to minimize latency on fetch)
2129 * WTHRESH defaults to 1 (writeback each descriptor)
2130 */
2131 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2132 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2133
2134 /* Not a valid value for 82599 */
2135 if (hw->mac.type < ixgbe_mac_82599EB) {
2136 reg_val |= 0x0020; /* pthresh */
2137 }
2138 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2139
2140 if (hw->mac.type >= ixgbe_mac_82599EB) {
2141 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2142 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2143 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2144 }
2145
2146 /*
2147 * Setup the Split and Replication Receive Control Register.
2148 * Set the rx buffer size and the advanced descriptor type.
2149 */
2150 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2151 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2152 reg_val |= IXGBE_SRRCTL_DROP_EN;
2153 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2154 }
2155
2156 static void
2157 ixgbe_setup_rx(ixgbe_t *ixgbe)
2158 {
2159 ixgbe_rx_ring_t *rx_ring;
2160 struct ixgbe_hw *hw = &ixgbe->hw;
2357 */
2358 tx_ring->tbd_head_wb = (uint32_t *)
2359 ((uintptr_t)tx_ring->tbd_area.address + size);
2360 *tx_ring->tbd_head_wb = 0;
2361
2362 buf_low = (uint32_t)
2363 (tx_ring->tbd_area.dma_address + size);
2364 buf_high = (uint32_t)
2365 ((tx_ring->tbd_area.dma_address + size) >> 32);
2366
2367 /* Set the head write-back enable bit */
2368 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2369
2370 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2371 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2372
2373 /*
2374 * Turn off relaxed ordering for head write back or it will
2375 * cause problems with the tx recycling
2376 */
2377
2378 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2379 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2380 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2381 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2382 if (hw->mac.type == ixgbe_mac_82598EB) {
2383 IXGBE_WRITE_REG(hw,
2384 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2385 } else {
2386 IXGBE_WRITE_REG(hw,
2387 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2388 }
2389 } else {
2390 tx_ring->tbd_head_wb = NULL;
2391 }
2392
2393 tx_ring->tbd_head = 0;
2394 tx_ring->tbd_tail = 0;
2395 tx_ring->tbd_free = tx_ring->ring_size;
2396
2397 if (ixgbe->tx_ring_init == B_TRUE) {
2398 tx_ring->tcb_head = 0;
2399 tx_ring->tcb_tail = 0;
2400 tx_ring->tcb_free = tx_ring->free_list_size;
2401 }
2402
2403 /*
2404 * Initialize the s/w context structure
2405 */
2406 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2407 }
2408
2409 static void
2417
2418 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2419 tx_ring = &ixgbe->tx_rings[i];
2420 ixgbe_setup_tx_ring(tx_ring);
2421 }
2422
2423 /*
2424 * Setup the per-ring statistics mapping.
2425 */
2426 ring_mapping = 0;
2427 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2428 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2429 if ((i & 0x3) == 0x3) {
2430 switch (hw->mac.type) {
2431 case ixgbe_mac_82598EB:
2432 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2433 ring_mapping);
2434 break;
2435
2436 case ixgbe_mac_82599EB:
2437 case ixgbe_mac_X540:
2438 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2439 ring_mapping);
2440 break;
2441
2442 default:
2443 break;
2444 }
2445
2446 ring_mapping = 0;
2447 }
2448 }
2449 if (i & 0x3) {
2450 switch (hw->mac.type) {
2451 case ixgbe_mac_82598EB:
2452 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2453 break;
2454
2455 case ixgbe_mac_82599EB:
2456 case ixgbe_mac_X540:
2457 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2458 break;
2459
2460 default:
2461 break;
2462 }
2463 }
2464
2465 /*
2466 * Enable CRC appending and TX padding (for short tx frames)
2467 */
2468 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2469 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2470 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2471
2472 /*
2473 * enable DMA for 82599 and X540 parts
2474 */
2475 if (hw->mac.type >= ixgbe_mac_82599EB) {
2476 /* DMATXCTL.TE must be set after all Tx config is complete */
2477 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2478 reg_val |= IXGBE_DMATXCTL_TE;
2479 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2480
2481 /* Disable arbiter to set MTQC */
2482 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2483 reg_val |= IXGBE_RTTDCS_ARBDIS;
2484 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2485 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2486 reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2487 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2488 }
2489
2490 /*
2491 * Enabling tx queues ..
2492 * For 82599 must be done after DMATXCTL.TE is set
2493 */
2494 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2495 tx_ring = &ixgbe->tx_rings[i];
2496 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2497 reg_val |= IXGBE_TXDCTL_ENABLE;
2498 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2499 }
2500 }
2501
2502 /*
2503 * ixgbe_setup_rss - Setup receive-side scaling feature.
2504 */
2505 static void
2506 ixgbe_setup_rss(ixgbe_t *ixgbe)
2507 {
2567 {
2568 struct ixgbe_hw *hw = &ixgbe->hw;
2569 uint32_t vmdctl, i, vtctl;
2570
2571 /*
2572 * Setup the VMDq Control register, enable VMDq based on
2573 * packet destination MAC address:
2574 */
2575 switch (hw->mac.type) {
2576 case ixgbe_mac_82598EB:
2577 /*
2578 * VMDq Enable = 1;
2579 * VMDq Filter = 0; MAC filtering
2580 * Default VMDq output index = 0;
2581 */
2582 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2583 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2584 break;
2585
2586 case ixgbe_mac_82599EB:
2587 case ixgbe_mac_X540:
2588 /*
2589 * Enable VMDq-only.
2590 */
2591 vmdctl = IXGBE_MRQC_VMDQEN;
2592 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2593
2594 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2595 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2596 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2597 }
2598
2599 /*
2600 * Enable Virtualization and Replication.
2601 */
2602 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2603 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2604
2605 /*
2606 * Enable receiving packets to all VFs
2607 */
2661 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2662 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2663 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2664 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2665 IXGBE_MRQC_RSS_FIELD_IPV6 |
2666 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2667 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2668 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2669 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2670
2671 /*
2672 * Enable and Setup VMDq
2673 * VMDq Filter = 0; MAC filtering
2674 * Default VMDq output index = 0;
2675 */
2676 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2677 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2678 break;
2679
2680 case ixgbe_mac_82599EB:
2681 case ixgbe_mac_X540:
2682 /*
2683 * Enable RSS & Setup RSS Hash functions
2684 */
2685 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2686 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2687 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2688 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2689 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2690 IXGBE_MRQC_RSS_FIELD_IPV6 |
2691 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2692 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2693 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2694
2695 /*
2696 * Enable VMDq+RSS.
2697 */
2698 if (ixgbe->num_rx_groups > 32) {
2699 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2700 } else {
2701 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2707 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2708 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2709 }
2710 break;
2711
2712 default:
2713 break;
2714
2715 }
2716
2717 /*
2718 * Disable Packet Checksum to enable RSS for multiple receive queues.
2719 * It is an adapter hardware limitation that Packet Checksum is
2720 * mutually exclusive with RSS.
2721 */
2722 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2723 rxcsum |= IXGBE_RXCSUM_PCSD;
2724 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2725 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2726
2727 if (hw->mac.type >= ixgbe_mac_82599EB) {
2728 /*
2729 * Enable Virtualization and Replication.
2730 */
2731 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2732 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2733
2734 /*
2735 * Enable receiving packets to all VFs
2736 */
2737 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2738 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2739 }
2740 }
2741
2742 /*
2743 * ixgbe_init_unicst - Initialize the unicast addresses.
2744 */
2745 static void
2746 ixgbe_init_unicst(ixgbe_t *ixgbe)
2747 {
2882 * and save them in the hardware registers.
2883 */
2884 static void
2885 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2886 {
2887 uint8_t *mc_addr_list;
2888 uint32_t mc_addr_count;
2889 struct ixgbe_hw *hw = &ixgbe->hw;
2890
2891 ASSERT(mutex_owned(&ixgbe->gen_lock));
2892
2893 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2894
2895 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2896 mc_addr_count = ixgbe->mcast_count;
2897
2898 /*
2899 * Update the multicast addresses to the MTA registers
2900 */
2901 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2902 ixgbe_mc_table_itr, TRUE);
2903 }
2904
2905 /*
2906 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2907 *
2908 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2909 * Different chipsets may have different allowed configuration of vmdq and rss.
2910 */
2911 static void
2912 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2913 {
2914 struct ixgbe_hw *hw = &ixgbe->hw;
2915 uint32_t ring_per_group;
2916
2917 switch (hw->mac.type) {
2918 case ixgbe_mac_82598EB:
2919 /*
2920 * 82598 supports the following combination:
2921 * vmdq no. x rss no.
2922 * [5..16] x 1
2923 * [1..4] x [1..16]
2924 * However 8 rss queue per pool (vmdq) is sufficient for
2925 * most cases.
2926 */
2927 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2928 if (ixgbe->num_rx_groups > 4) {
2929 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2930 } else {
2931 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2932 min(8, ring_per_group);
2933 }
2934
2935 break;
2936
2937 case ixgbe_mac_82599EB:
2938 case ixgbe_mac_X540:
2939 /*
2940 * 82599 supports the following combination:
2941 * vmdq no. x rss no.
2942 * [33..64] x [1..2]
2943 * [2..32] x [1..4]
2944 * 1 x [1..16]
2945 * However 8 rss queue per pool (vmdq) is sufficient for
2946 * most cases.
2947 *
2948 * For now, treat X540 like the 82599.
2949 */
2950 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2951 if (ixgbe->num_rx_groups == 1) {
2952 ixgbe->num_rx_rings = min(8, ring_per_group);
2953 } else if (ixgbe->num_rx_groups <= 32) {
2954 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2955 min(4, ring_per_group);
2956 } else if (ixgbe->num_rx_groups <= 64) {
2957 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2958 min(2, ring_per_group);
2959 }
2960 break;
2961
2962 default:
2963 break;
2964 }
2965
2966 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2967
2968 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
3091 * 1 = force interrupt type MSI-X
3092 * 2 = force interrupt type MSI
3093 * 3 = force interrupt type Legacy
3094 */
3095 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3096 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3097
3098 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3099 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3100 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3101 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3102 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3103 0, 1, DEFAULT_LSO_ENABLE);
3104 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3105 0, 1, DEFAULT_LRO_ENABLE);
3106 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3107 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3108 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3109 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3110
3111 /* Head Write Back not recommended for 82599 and X540 */
3112 if (hw->mac.type >= ixgbe_mac_82599EB) {
3113 ixgbe->tx_head_wb_enable = B_FALSE;
3114 }
3115
3116 /*
3117 * ixgbe LSO needs the tx h/w checksum support.
3118 * LSO will be disabled if tx h/w checksum is not
3119 * enabled.
3120 */
3121 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3122 ixgbe->lso_enable = B_FALSE;
3123 }
3124
3125 /*
3126 * ixgbe LRO needs the rx h/w checksum support.
3127 * LRO will be disabled if rx h/w checksum is not
3128 * enabled.
3129 */
3130 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3131 ixgbe->lro_enable = B_FALSE;
3132 }
3133
3134 /*
3135 * ixgbe LRO only been supported by 82599 and X540 now
3136 */
3137 if (hw->mac.type < ixgbe_mac_82599EB) {
3138 ixgbe->lro_enable = B_FALSE;
3139 }
3140 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3141 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3142 DEFAULT_TX_COPY_THRESHOLD);
3143 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3144 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3145 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3146 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3147 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3148 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3149 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3150 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3151 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3152
3153 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3154 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3155 DEFAULT_RX_COPY_THRESHOLD);
3156 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3157 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3158 DEFAULT_RX_LIMIT_PER_INTR);
3159
3160 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3161 ixgbe->capab->min_intr_throttle,
3162 ixgbe->capab->max_intr_throttle,
3163 ixgbe->capab->def_intr_throttle);
3164 /*
3165 * 82599 and X540 require the interupt throttling rate is
3166 * a multiple of 8. This is enforced by the register
3167 * definiton.
3168 */
3169 if (hw->mac.type >= ixgbe_mac_82599EB)
3170 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3171 }
3172
3173 static void
3174 ixgbe_init_params(ixgbe_t *ixgbe)
3175 {
3176 ixgbe->param_en_10000fdx_cap = 1;
3177 ixgbe->param_en_1000fdx_cap = 1;
3178 ixgbe->param_en_100fdx_cap = 1;
3179 ixgbe->param_adv_10000fdx_cap = 1;
3180 ixgbe->param_adv_1000fdx_cap = 1;
3181 ixgbe->param_adv_100fdx_cap = 1;
3182
3183 ixgbe->param_pause_cap = 1;
3184 ixgbe->param_asym_pause_cap = 1;
3185 ixgbe->param_rem_fault = 0;
3186
3187 ixgbe->param_adv_autoneg_cap = 1;
3188 ixgbe->param_adv_pause_cap = 1;
3189 ixgbe->param_adv_asym_pause_cap = 1;
3275 /*
3276 * ixgbe_driver_link_check - Link status processing.
3277 *
3278 * This function can be called in both kernel context and interrupt context
3279 */
3280 static void
3281 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3282 {
3283 struct ixgbe_hw *hw = &ixgbe->hw;
3284 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3285 boolean_t link_up = B_FALSE;
3286 boolean_t link_changed = B_FALSE;
3287
3288 ASSERT(mutex_owned(&ixgbe->gen_lock));
3289
3290 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3291 if (link_up) {
3292 ixgbe->link_check_complete = B_TRUE;
3293
3294 /* Link is up, enable flow control settings */
3295 (void) ixgbe_fc_enable(hw);
3296
3297 /*
3298 * The Link is up, check whether it was marked as down earlier
3299 */
3300 if (ixgbe->link_state != LINK_STATE_UP) {
3301 switch (speed) {
3302 case IXGBE_LINK_SPEED_10GB_FULL:
3303 ixgbe->link_speed = SPEED_10GB;
3304 break;
3305 case IXGBE_LINK_SPEED_1GB_FULL:
3306 ixgbe->link_speed = SPEED_1GB;
3307 break;
3308 case IXGBE_LINK_SPEED_100_FULL:
3309 ixgbe->link_speed = SPEED_100;
3310 }
3311 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3312 ixgbe->link_state = LINK_STATE_UP;
3313 link_changed = B_TRUE;
3314 }
3315 } else {
3794 eiac = 0;
3795
3796 /*
3797 * General purpose interrupt enable.
3798 * For 82599, extended interrupt automask enable
3799 * only in MSI or MSI-X mode
3800 */
3801 if ((hw->mac.type < ixgbe_mac_82599EB) ||
3802 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3803 gpie |= IXGBE_GPIE_EIAME;
3804 }
3805 }
3806
3807 /* Enable specific "other" interrupt types */
3808 switch (hw->mac.type) {
3809 case ixgbe_mac_82598EB:
3810 gpie |= ixgbe->capab->other_gpie;
3811 break;
3812
3813 case ixgbe_mac_82599EB:
3814 case ixgbe_mac_X540:
3815 gpie |= ixgbe->capab->other_gpie;
3816
3817 /* Enable RSC Delay 8us when LRO enabled */
3818 if (ixgbe->lro_enable) {
3819 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3820 }
3821 break;
3822
3823 default:
3824 break;
3825 }
3826
3827 /* write to interrupt control registers */
3828 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3829 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3830 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3831 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3832 IXGBE_WRITE_FLUSH(hw);
3833 }
3834
3988 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3989 &atlas);
3990 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3991 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3992 atlas);
3993
3994 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3995 &atlas);
3996 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3997 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3998 atlas);
3999
4000 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4001 &atlas);
4002 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4003 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4004 atlas);
4005 break;
4006
4007 case ixgbe_mac_82599EB:
4008 case ixgbe_mac_X540:
4009 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4010 reg |= (IXGBE_AUTOC_FLU |
4011 IXGBE_AUTOC_10G_KX4);
4012 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4013
4014 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4015 B_FALSE, B_TRUE);
4016 break;
4017
4018 default:
4019 break;
4020 }
4021 }
4022
4023 #pragma inline(ixgbe_intr_rx_work)
4024 /*
4025 * ixgbe_intr_rx_work - RX processing of ISR.
4026 */
4027 static void
4028 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4207 * Recycle the tx descriptors
4208 */
4209 tx_ring = &ixgbe->tx_rings[0];
4210 tx_ring->tx_recycle(tx_ring);
4211
4212 /*
4213 * Schedule the re-transmit
4214 */
4215 tx_reschedule = (tx_ring->reschedule &&
4216 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4217 }
4218
4219 /* any interrupt type other than tx/rx */
4220 if (eicr & ixgbe->capab->other_intr) {
4221 switch (hw->mac.type) {
4222 case ixgbe_mac_82598EB:
4223 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4224 break;
4225
4226 case ixgbe_mac_82599EB:
4227 case ixgbe_mac_X540:
4228 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4229 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4230 break;
4231
4232 default:
4233 break;
4234 }
4235 ixgbe_intr_other_work(ixgbe, eicr);
4236 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4237 }
4238
4239 mutex_exit(&ixgbe->gen_lock);
4240
4241 result = DDI_INTR_CLAIMED;
4242 } else {
4243 mutex_exit(&ixgbe->gen_lock);
4244
4245 /*
4246 * No interrupt cause bits set: don't claim this interrupt.
4247 */
4301 if (eicr & 0x1) {
4302 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4303 }
4304
4305 /*
4306 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4307 */
4308 if (eicr & 0x2) {
4309 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4310 }
4311
4312 /* any interrupt type other than tx/rx */
4313 if (eicr & ixgbe->capab->other_intr) {
4314 mutex_enter(&ixgbe->gen_lock);
4315 switch (hw->mac.type) {
4316 case ixgbe_mac_82598EB:
4317 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4318 break;
4319
4320 case ixgbe_mac_82599EB:
4321 case ixgbe_mac_X540:
4322 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4323 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4324 break;
4325
4326 default:
4327 break;
4328 }
4329 ixgbe_intr_other_work(ixgbe, eicr);
4330 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4331 mutex_exit(&ixgbe->gen_lock);
4332 }
4333
4334 /* re-enable the interrupts which were automasked */
4335 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4336
4337 return (DDI_INTR_CLAIMED);
4338 }
4339
4340 /*
4341 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4381 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4382 DDI_FM_OK) {
4383 ddi_fm_service_impact(ixgbe->dip,
4384 DDI_SERVICE_DEGRADED);
4385 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4386 return (DDI_INTR_CLAIMED);
4387 }
4388
4389 /*
4390 * Check "other" cause bits: any interrupt type other than tx/rx
4391 */
4392 if (eicr & ixgbe->capab->other_intr) {
4393 mutex_enter(&ixgbe->gen_lock);
4394 switch (hw->mac.type) {
4395 case ixgbe_mac_82598EB:
4396 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4397 ixgbe_intr_other_work(ixgbe, eicr);
4398 break;
4399
4400 case ixgbe_mac_82599EB:
4401 case ixgbe_mac_X540:
4402 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4403 ixgbe_intr_other_work(ixgbe, eicr);
4404 break;
4405
4406 default:
4407 break;
4408 }
4409 mutex_exit(&ixgbe->gen_lock);
4410 }
4411
4412 /* re-enable the interrupts which were automasked */
4413 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4414 }
4415
4416 return (DDI_INTR_CLAIMED);
4417 }
4418
4419 /*
4420 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4421 *
4782 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4783 int8_t cause)
4784 {
4785 struct ixgbe_hw *hw = &ixgbe->hw;
4786 u32 ivar, index;
4787
4788 switch (hw->mac.type) {
4789 case ixgbe_mac_82598EB:
4790 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4791 if (cause == -1) {
4792 cause = 0;
4793 }
4794 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4795 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4796 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4797 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4798 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4799 break;
4800
4801 case ixgbe_mac_82599EB:
4802 case ixgbe_mac_X540:
4803 if (cause == -1) {
4804 /* other causes */
4805 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4806 index = (intr_alloc_entry & 1) * 8;
4807 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4808 ivar &= ~(0xFF << index);
4809 ivar |= (msix_vector << index);
4810 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4811 } else {
4812 /* tx or rx causes */
4813 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4814 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4815 ivar = IXGBE_READ_REG(hw,
4816 IXGBE_IVAR(intr_alloc_entry >> 1));
4817 ivar &= ~(0xFF << index);
4818 ivar |= (msix_vector << index);
4819 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4820 ivar);
4821 }
4822 break;
4836 */
4837 static void
4838 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4839 {
4840 struct ixgbe_hw *hw = &ixgbe->hw;
4841 u32 ivar, index;
4842
4843 switch (hw->mac.type) {
4844 case ixgbe_mac_82598EB:
4845 if (cause == -1) {
4846 cause = 0;
4847 }
4848 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4849 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4850 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4851 (intr_alloc_entry & 0x3)));
4852 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4853 break;
4854
4855 case ixgbe_mac_82599EB:
4856 case ixgbe_mac_X540:
4857 if (cause == -1) {
4858 /* other causes */
4859 index = (intr_alloc_entry & 1) * 8;
4860 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4861 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4862 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4863 } else {
4864 /* tx or rx causes */
4865 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4866 ivar = IXGBE_READ_REG(hw,
4867 IXGBE_IVAR(intr_alloc_entry >> 1));
4868 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4869 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4870 ivar);
4871 }
4872 break;
4873
4874 default:
4875 break;
4876 }
4886 */
4887 static void
4888 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4889 {
4890 struct ixgbe_hw *hw = &ixgbe->hw;
4891 u32 ivar, index;
4892
4893 switch (hw->mac.type) {
4894 case ixgbe_mac_82598EB:
4895 if (cause == -1) {
4896 cause = 0;
4897 }
4898 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4899 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4900 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4901 (intr_alloc_entry & 0x3)));
4902 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4903 break;
4904
4905 case ixgbe_mac_82599EB:
4906 case ixgbe_mac_X540:
4907 if (cause == -1) {
4908 /* other causes */
4909 index = (intr_alloc_entry & 1) * 8;
4910 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4911 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4912 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4913 } else {
4914 /* tx or rx causes */
4915 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4916 ivar = IXGBE_READ_REG(hw,
4917 IXGBE_IVAR(intr_alloc_entry >> 1));
4918 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4919 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4920 ivar);
4921 }
4922 break;
4923
4924 default:
4925 break;
4926 }
4929 /*
4930 * Convert the rx ring index driver maintained to the rx ring index
4931 * in h/w.
4932 */
4933 static uint32_t
4934 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4935 {
4936
4937 struct ixgbe_hw *hw = &ixgbe->hw;
4938 uint32_t rx_ring_per_group, hw_rx_index;
4939
4940 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4941 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4942 return (sw_rx_index);
4943 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4944 switch (hw->mac.type) {
4945 case ixgbe_mac_82598EB:
4946 return (sw_rx_index);
4947
4948 case ixgbe_mac_82599EB:
4949 case ixgbe_mac_X540:
4950 return (sw_rx_index * 2);
4951
4952 default:
4953 break;
4954 }
4955 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4956 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4957
4958 switch (hw->mac.type) {
4959 case ixgbe_mac_82598EB:
4960 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4961 16 + (sw_rx_index % rx_ring_per_group);
4962 return (hw_rx_index);
4963
4964 case ixgbe_mac_82599EB:
4965 case ixgbe_mac_X540:
4966 if (ixgbe->num_rx_groups > 32) {
4967 hw_rx_index = (sw_rx_index /
4968 rx_ring_per_group) * 2 +
4969 (sw_rx_index % rx_ring_per_group);
4970 } else {
4971 hw_rx_index = (sw_rx_index /
4972 rx_ring_per_group) * 4 +
4973 (sw_rx_index % rx_ring_per_group);
4974 }
4975 return (hw_rx_index);
4976
4977 default:
4978 break;
4979 }
4980 }
4981
4982 /*
4983 * Should never reach. Just to make compiler happy.
4984 */
4985 return (sw_rx_index);
5050 */
5051 static void
5052 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5053 {
5054 struct ixgbe_hw *hw = &ixgbe->hw;
5055 ixgbe_intr_vector_t *vect; /* vector bitmap */
5056 int r_idx; /* ring index */
5057 int v_idx; /* vector index */
5058 uint32_t hw_index;
5059
5060 /*
5061 * Clear any previous entries
5062 */
5063 switch (hw->mac.type) {
5064 case ixgbe_mac_82598EB:
5065 for (v_idx = 0; v_idx < 25; v_idx++)
5066 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5067 break;
5068
5069 case ixgbe_mac_82599EB:
5070 case ixgbe_mac_X540:
5071 for (v_idx = 0; v_idx < 64; v_idx++)
5072 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5073 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5074 break;
5075
5076 default:
5077 break;
5078 }
5079
5080 /*
5081 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5082 * tx rings[0] will use RTxQ[1].
5083 */
5084 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5085 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5086 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5087 return;
5088 }
5089
5090 /*
|