8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 */
29
30 #include "ixgbe_sw.h"
31
32 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
33 static char ixgbe_version[] = "ixgbe 1.1.7";
34
35 /*
36 * Local function protoypes
37 */
38 static int ixgbe_register_mac(ixgbe_t *);
39 static int ixgbe_identify_hardware(ixgbe_t *);
40 static int ixgbe_regs_map(ixgbe_t *);
41 static void ixgbe_init_properties(ixgbe_t *);
42 static int ixgbe_init_driver_settings(ixgbe_t *);
43 static void ixgbe_init_locks(ixgbe_t *);
44 static void ixgbe_destroy_locks(ixgbe_t *);
45 static int ixgbe_init(ixgbe_t *);
46 static int ixgbe_chip_start(ixgbe_t *);
47 static void ixgbe_chip_stop(ixgbe_t *);
274 0xFF8, /* maximum interrupt throttle rate */
275 0, /* minimum interrupt throttle rate */
276 200, /* default interrupt throttle rate */
277 64, /* maximum total msix vectors */
278 16, /* maximum number of ring vectors */
279 2, /* maximum number of other vectors */
280 (IXGBE_EICR_LSC
281 | IXGBE_EICR_GPI_SDP1
282 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
283
284 (IXGBE_SDP1_GPIEN
285 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
286
287 (IXGBE_FLAG_DCA_CAPABLE
288 | IXGBE_FLAG_RSS_CAPABLE
289 | IXGBE_FLAG_VMDQ_CAPABLE
290 | IXGBE_FLAG_RSC_CAPABLE
291 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
292 };
293
294 /*
295 * Module Initialization Functions.
296 */
297
298 int
299 _init(void)
300 {
301 int status;
302
303 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
304
305 status = mod_install(&ixgbe_modlinkage);
306
307 if (status != DDI_SUCCESS) {
308 mac_fini_ops(&ixgbe_dev_ops);
309 }
310
311 return (status);
312 }
313
850 ixgbe->capab = &ixgbe_82598eb_cap;
851
852 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
853 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
854 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
855 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
856 }
857 break;
858
859 case ixgbe_mac_82599EB:
860 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
861 ixgbe->capab = &ixgbe_82599eb_cap;
862
863 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
864 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
865 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
866 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
867 }
868 break;
869
870 default:
871 IXGBE_DEBUGLOG_1(ixgbe,
872 "adapter not supported in ixgbe_identify_hardware(): %d\n",
873 hw->mac.type);
874 return (IXGBE_FAILURE);
875 }
876
877 return (IXGBE_SUCCESS);
878 }
879
880 /*
881 * ixgbe_regs_map - Map the device registers.
882 *
883 */
884 static int
885 ixgbe_regs_map(ixgbe_t *ixgbe)
886 {
887 dev_info_t *devinfo = ixgbe->dip;
888 struct ixgbe_hw *hw = &ixgbe->hw;
889 struct ixgbe_osdep *osdep = &ixgbe->osdep;
1183 */
1184 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1185 /*
1186 * Some PCI-E parts fail the first check due to
1187 * the link being in sleep state. Call it again,
1188 * if it fails a second time it's a real issue.
1189 */
1190 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1191 ixgbe_error(ixgbe,
1192 "Invalid NVM checksum. Please contact "
1193 "the vendor to update the NVM.");
1194 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1195 goto init_fail;
1196 }
1197 }
1198
1199 /*
1200 * Setup default flow control thresholds - enable/disable
1201 * & flow control type is controlled by ixgbe.conf
1202 */
1203 hw->fc.high_water = DEFAULT_FCRTH;
1204 hw->fc.low_water = DEFAULT_FCRTL;
1205 hw->fc.pause_time = DEFAULT_FCPAUSE;
1206 hw->fc.send_xon = B_TRUE;
1207
1208 /*
1209 * Initialize link settings
1210 */
1211 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1212
1213 /*
1214 * Initialize the chipset hardware
1215 */
1216 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1217 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1218 goto init_fail;
1219 }
1220
1221 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1222 goto init_fail;
1223 }
1224
2079 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2080
2081 rx_data->rbd_next = 0;
2082 rx_data->lro_first = 0;
2083
2084 /*
2085 * Setup the Receive Descriptor Control Register (RXDCTL)
2086 * PTHRESH=32 descriptors (half the internal cache)
2087 * HTHRESH=0 descriptors (to minimize latency on fetch)
2088 * WTHRESH defaults to 1 (writeback each descriptor)
2089 */
2090 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2091 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2092
2093 /* Not a valid value for 82599 */
2094 if (hw->mac.type < ixgbe_mac_82599EB) {
2095 reg_val |= 0x0020; /* pthresh */
2096 }
2097 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2098
2099 if (hw->mac.type == ixgbe_mac_82599EB) {
2100 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2101 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2102 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2103 }
2104
2105 /*
2106 * Setup the Split and Replication Receive Control Register.
2107 * Set the rx buffer size and the advanced descriptor type.
2108 */
2109 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2110 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2111 reg_val |= IXGBE_SRRCTL_DROP_EN;
2112 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2113 }
2114
2115 static void
2116 ixgbe_setup_rx(ixgbe_t *ixgbe)
2117 {
2118 ixgbe_rx_ring_t *rx_ring;
2119 struct ixgbe_hw *hw = &ixgbe->hw;
2316 */
2317 tx_ring->tbd_head_wb = (uint32_t *)
2318 ((uintptr_t)tx_ring->tbd_area.address + size);
2319 *tx_ring->tbd_head_wb = 0;
2320
2321 buf_low = (uint32_t)
2322 (tx_ring->tbd_area.dma_address + size);
2323 buf_high = (uint32_t)
2324 ((tx_ring->tbd_area.dma_address + size) >> 32);
2325
2326 /* Set the head write-back enable bit */
2327 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2328
2329 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2330 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2331
2332 /*
2333 * Turn off relaxed ordering for head write back or it will
2334 * cause problems with the tx recycling
2335 */
2336 reg_val = IXGBE_READ_REG(hw,
2337 IXGBE_DCA_TXCTRL(tx_ring->index));
2338 reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2339 IXGBE_WRITE_REG(hw,
2340 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2341 } else {
2342 tx_ring->tbd_head_wb = NULL;
2343 }
2344
2345 tx_ring->tbd_head = 0;
2346 tx_ring->tbd_tail = 0;
2347 tx_ring->tbd_free = tx_ring->ring_size;
2348
2349 if (ixgbe->tx_ring_init == B_TRUE) {
2350 tx_ring->tcb_head = 0;
2351 tx_ring->tcb_tail = 0;
2352 tx_ring->tcb_free = tx_ring->free_list_size;
2353 }
2354
2355 /*
2356 * Initialize the s/w context structure
2357 */
2358 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2359 }
2360
2361 static void
2362 ixgbe_setup_tx(ixgbe_t *ixgbe)
2363 {
2369
2370 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2371 tx_ring = &ixgbe->tx_rings[i];
2372 ixgbe_setup_tx_ring(tx_ring);
2373 }
2374
2375 /*
2376 * Setup the per-ring statistics mapping.
2377 */
2378 ring_mapping = 0;
2379 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2380 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2381 if ((i & 0x3) == 0x3) {
2382 switch (hw->mac.type) {
2383 case ixgbe_mac_82598EB:
2384 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2385 ring_mapping);
2386 break;
2387
2388 case ixgbe_mac_82599EB:
2389 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2390 ring_mapping);
2391 break;
2392
2393 default:
2394 break;
2395 }
2396
2397 ring_mapping = 0;
2398 }
2399 }
2400 if (i & 0x3) {
2401 switch (hw->mac.type) {
2402 case ixgbe_mac_82598EB:
2403 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2404 break;
2405
2406 case ixgbe_mac_82599EB:
2407 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2408 break;
2409
2410 default:
2411 break;
2412 }
2413 }
2414
2415 /*
2416 * Enable CRC appending and TX padding (for short tx frames)
2417 */
2418 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2419 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2420 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2421
2422 /*
2423 * enable DMA for 82599 parts
2424 */
2425 if (hw->mac.type == ixgbe_mac_82599EB) {
2426 /* DMATXCTL.TE must be set after all Tx config is complete */
2427 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2428 reg_val |= IXGBE_DMATXCTL_TE;
2429 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2430 }
2431
2432 /*
2433 * Enabling tx queues ..
2434 * For 82599 must be done after DMATXCTL.TE is set
2435 */
2436 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2437 tx_ring = &ixgbe->tx_rings[i];
2438 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2439 reg_val |= IXGBE_TXDCTL_ENABLE;
2440 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2441 }
2442 }
2443
2444 /*
2445 * ixgbe_setup_rss - Setup receive-side scaling feature.
2446 */
2447 static void
2448 ixgbe_setup_rss(ixgbe_t *ixgbe)
2449 {
2509 {
2510 struct ixgbe_hw *hw = &ixgbe->hw;
2511 uint32_t vmdctl, i, vtctl;
2512
2513 /*
2514 * Setup the VMDq Control register, enable VMDq based on
2515 * packet destination MAC address:
2516 */
2517 switch (hw->mac.type) {
2518 case ixgbe_mac_82598EB:
2519 /*
2520 * VMDq Enable = 1;
2521 * VMDq Filter = 0; MAC filtering
2522 * Default VMDq output index = 0;
2523 */
2524 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2525 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2526 break;
2527
2528 case ixgbe_mac_82599EB:
2529 /*
2530 * Enable VMDq-only.
2531 */
2532 vmdctl = IXGBE_MRQC_VMDQEN;
2533 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2534
2535 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2536 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2537 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2538 }
2539
2540 /*
2541 * Enable Virtualization and Replication.
2542 */
2543 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2544 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2545
2546 /*
2547 * Enable receiving packets to all VFs
2548 */
2602 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2603 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2604 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2605 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2606 IXGBE_MRQC_RSS_FIELD_IPV6 |
2607 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2608 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2609 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2610 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2611
2612 /*
2613 * Enable and Setup VMDq
2614 * VMDq Filter = 0; MAC filtering
2615 * Default VMDq output index = 0;
2616 */
2617 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2618 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2619 break;
2620
2621 case ixgbe_mac_82599EB:
2622 /*
2623 * Enable RSS & Setup RSS Hash functions
2624 */
2625 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2626 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2627 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2628 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2629 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2630 IXGBE_MRQC_RSS_FIELD_IPV6 |
2631 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2632 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2633 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2634
2635 /*
2636 * Enable VMDq+RSS.
2637 */
2638 if (ixgbe->num_rx_groups > 32) {
2639 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2640 } else {
2641 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2647 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2648 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2649 }
2650 break;
2651
2652 default:
2653 break;
2654
2655 }
2656
2657 /*
2658 * Disable Packet Checksum to enable RSS for multiple receive queues.
2659 * It is an adapter hardware limitation that Packet Checksum is
2660 * mutually exclusive with RSS.
2661 */
2662 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2663 rxcsum |= IXGBE_RXCSUM_PCSD;
2664 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2665 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2666
2667 if (hw->mac.type == ixgbe_mac_82599EB) {
2668 /*
2669 * Enable Virtualization and Replication.
2670 */
2671 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2672 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2673
2674 /*
2675 * Enable receiving packets to all VFs
2676 */
2677 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2678 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2679 }
2680 }
2681
2682 /*
2683 * ixgbe_init_unicst - Initialize the unicast addresses.
2684 */
2685 static void
2686 ixgbe_init_unicst(ixgbe_t *ixgbe)
2687 {
2822 * and save them in the hardware registers.
2823 */
2824 static void
2825 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2826 {
2827 uint8_t *mc_addr_list;
2828 uint32_t mc_addr_count;
2829 struct ixgbe_hw *hw = &ixgbe->hw;
2830
2831 ASSERT(mutex_owned(&ixgbe->gen_lock));
2832
2833 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2834
2835 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2836 mc_addr_count = ixgbe->mcast_count;
2837
2838 /*
2839 * Update the multicast addresses to the MTA registers
2840 */
2841 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2842 ixgbe_mc_table_itr);
2843 }
2844
2845 /*
2846 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2847 *
2848 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2849 * Different chipsets may have different allowed configuration of vmdq and rss.
2850 */
2851 static void
2852 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2853 {
2854 struct ixgbe_hw *hw = &ixgbe->hw;
2855 uint32_t ring_per_group;
2856
2857 switch (hw->mac.type) {
2858 case ixgbe_mac_82598EB:
2859 /*
2860 * 82598 supports the following combination:
2861 * vmdq no. x rss no.
2862 * [5..16] x 1
2863 * [1..4] x [1..16]
2864 * However 8 rss queue per pool (vmdq) is sufficient for
2865 * most cases.
2866 */
2867 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2868 if (ixgbe->num_rx_groups > 4) {
2869 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2870 } else {
2871 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2872 min(8, ring_per_group);
2873 }
2874
2875 break;
2876
2877 case ixgbe_mac_82599EB:
2878 /*
2879 * 82599 supports the following combination:
2880 * vmdq no. x rss no.
2881 * [33..64] x [1..2]
2882 * [2..32] x [1..4]
2883 * 1 x [1..16]
2884 * However 8 rss queue per pool (vmdq) is sufficient for
2885 * most cases.
2886 */
2887 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2888 if (ixgbe->num_rx_groups == 1) {
2889 ixgbe->num_rx_rings = min(8, ring_per_group);
2890 } else if (ixgbe->num_rx_groups <= 32) {
2891 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2892 min(4, ring_per_group);
2893 } else if (ixgbe->num_rx_groups <= 64) {
2894 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2895 min(2, ring_per_group);
2896 }
2897 break;
2898
2899 default:
2900 break;
2901 }
2902
2903 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2904
2905 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
3028 * 1 = force interrupt type MSI-X
3029 * 2 = force interrupt type MSI
3030 * 3 = force interrupt type Legacy
3031 */
3032 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3033 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3034
3035 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3036 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3037 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3038 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3039 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3040 0, 1, DEFAULT_LSO_ENABLE);
3041 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3042 0, 1, DEFAULT_LRO_ENABLE);
3043 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3044 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3045 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3046 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3047
3048 /* Head Write Back not recommended for 82599 */
3049 if (hw->mac.type >= ixgbe_mac_82599EB) {
3050 ixgbe->tx_head_wb_enable = B_FALSE;
3051 }
3052
3053 /*
3054 * ixgbe LSO needs the tx h/w checksum support.
3055 * LSO will be disabled if tx h/w checksum is not
3056 * enabled.
3057 */
3058 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3059 ixgbe->lso_enable = B_FALSE;
3060 }
3061
3062 /*
3063 * ixgbe LRO needs the rx h/w checksum support.
3064 * LRO will be disabled if rx h/w checksum is not
3065 * enabled.
3066 */
3067 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3068 ixgbe->lro_enable = B_FALSE;
3069 }
3070
3071 /*
3072 * ixgbe LRO only been supported by 82599 now
3073 */
3074 if (hw->mac.type != ixgbe_mac_82599EB) {
3075 ixgbe->lro_enable = B_FALSE;
3076 }
3077 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3078 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3079 DEFAULT_TX_COPY_THRESHOLD);
3080 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3081 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3082 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3083 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3084 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3085 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3086 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3087 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3088 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3089
3090 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3091 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3092 DEFAULT_RX_COPY_THRESHOLD);
3093 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3094 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3095 DEFAULT_RX_LIMIT_PER_INTR);
3096
3097 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3098 ixgbe->capab->min_intr_throttle,
3099 ixgbe->capab->max_intr_throttle,
3100 ixgbe->capab->def_intr_throttle);
3101 /*
3102 * 82599 requires the interupt throttling rate is
3103 * a multiple of 8. This is enforced by the register
3104 * definiton.
3105 */
3106 if (hw->mac.type == ixgbe_mac_82599EB)
3107 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3108 }
3109
3110 static void
3111 ixgbe_init_params(ixgbe_t *ixgbe)
3112 {
3113 ixgbe->param_en_10000fdx_cap = 1;
3114 ixgbe->param_en_1000fdx_cap = 1;
3115 ixgbe->param_en_100fdx_cap = 1;
3116 ixgbe->param_adv_10000fdx_cap = 1;
3117 ixgbe->param_adv_1000fdx_cap = 1;
3118 ixgbe->param_adv_100fdx_cap = 1;
3119
3120 ixgbe->param_pause_cap = 1;
3121 ixgbe->param_asym_pause_cap = 1;
3122 ixgbe->param_rem_fault = 0;
3123
3124 ixgbe->param_adv_autoneg_cap = 1;
3125 ixgbe->param_adv_pause_cap = 1;
3126 ixgbe->param_adv_asym_pause_cap = 1;
3212 /*
3213 * ixgbe_driver_link_check - Link status processing.
3214 *
3215 * This function can be called in both kernel context and interrupt context
3216 */
3217 static void
3218 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3219 {
3220 struct ixgbe_hw *hw = &ixgbe->hw;
3221 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3222 boolean_t link_up = B_FALSE;
3223 boolean_t link_changed = B_FALSE;
3224
3225 ASSERT(mutex_owned(&ixgbe->gen_lock));
3226
3227 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3228 if (link_up) {
3229 ixgbe->link_check_complete = B_TRUE;
3230
3231 /* Link is up, enable flow control settings */
3232 (void) ixgbe_fc_enable(hw, 0);
3233
3234 /*
3235 * The Link is up, check whether it was marked as down earlier
3236 */
3237 if (ixgbe->link_state != LINK_STATE_UP) {
3238 switch (speed) {
3239 case IXGBE_LINK_SPEED_10GB_FULL:
3240 ixgbe->link_speed = SPEED_10GB;
3241 break;
3242 case IXGBE_LINK_SPEED_1GB_FULL:
3243 ixgbe->link_speed = SPEED_1GB;
3244 break;
3245 case IXGBE_LINK_SPEED_100_FULL:
3246 ixgbe->link_speed = SPEED_100;
3247 }
3248 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3249 ixgbe->link_state = LINK_STATE_UP;
3250 link_changed = B_TRUE;
3251 }
3252 } else {
3731 eiac = 0;
3732
3733 /*
3734 * General purpose interrupt enable.
3735 * For 82599, extended interrupt automask enable
3736 * only in MSI or MSI-X mode
3737 */
3738 if ((hw->mac.type < ixgbe_mac_82599EB) ||
3739 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3740 gpie |= IXGBE_GPIE_EIAME;
3741 }
3742 }
3743
3744 /* Enable specific "other" interrupt types */
3745 switch (hw->mac.type) {
3746 case ixgbe_mac_82598EB:
3747 gpie |= ixgbe->capab->other_gpie;
3748 break;
3749
3750 case ixgbe_mac_82599EB:
3751 gpie |= ixgbe->capab->other_gpie;
3752
3753 /* Enable RSC Delay 8us when LRO enabled */
3754 if (ixgbe->lro_enable) {
3755 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3756 }
3757 break;
3758
3759 default:
3760 break;
3761 }
3762
3763 /* write to interrupt control registers */
3764 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3765 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3766 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3767 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3768 IXGBE_WRITE_FLUSH(hw);
3769 }
3770
3924 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3925 &atlas);
3926 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3927 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3928 atlas);
3929
3930 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3931 &atlas);
3932 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3933 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3934 atlas);
3935
3936 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3937 &atlas);
3938 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3939 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3940 atlas);
3941 break;
3942
3943 case ixgbe_mac_82599EB:
3944 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3945 reg |= (IXGBE_AUTOC_FLU |
3946 IXGBE_AUTOC_10G_KX4);
3947 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3948
3949 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
3950 B_FALSE, B_TRUE);
3951 break;
3952
3953 default:
3954 break;
3955 }
3956 }
3957
3958 #pragma inline(ixgbe_intr_rx_work)
3959 /*
3960 * ixgbe_intr_rx_work - RX processing of ISR.
3961 */
3962 static void
3963 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4142 * Recycle the tx descriptors
4143 */
4144 tx_ring = &ixgbe->tx_rings[0];
4145 tx_ring->tx_recycle(tx_ring);
4146
4147 /*
4148 * Schedule the re-transmit
4149 */
4150 tx_reschedule = (tx_ring->reschedule &&
4151 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4152 }
4153
4154 /* any interrupt type other than tx/rx */
4155 if (eicr & ixgbe->capab->other_intr) {
4156 switch (hw->mac.type) {
4157 case ixgbe_mac_82598EB:
4158 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4159 break;
4160
4161 case ixgbe_mac_82599EB:
4162 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4163 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4164 break;
4165
4166 default:
4167 break;
4168 }
4169 ixgbe_intr_other_work(ixgbe, eicr);
4170 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4171 }
4172
4173 mutex_exit(&ixgbe->gen_lock);
4174
4175 result = DDI_INTR_CLAIMED;
4176 } else {
4177 mutex_exit(&ixgbe->gen_lock);
4178
4179 /*
4180 * No interrupt cause bits set: don't claim this interrupt.
4181 */
4235 if (eicr & 0x1) {
4236 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4237 }
4238
4239 /*
4240 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4241 */
4242 if (eicr & 0x2) {
4243 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4244 }
4245
4246 /* any interrupt type other than tx/rx */
4247 if (eicr & ixgbe->capab->other_intr) {
4248 mutex_enter(&ixgbe->gen_lock);
4249 switch (hw->mac.type) {
4250 case ixgbe_mac_82598EB:
4251 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4252 break;
4253
4254 case ixgbe_mac_82599EB:
4255 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4256 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4257 break;
4258
4259 default:
4260 break;
4261 }
4262 ixgbe_intr_other_work(ixgbe, eicr);
4263 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4264 mutex_exit(&ixgbe->gen_lock);
4265 }
4266
4267 /* re-enable the interrupts which were automasked */
4268 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4269
4270 return (DDI_INTR_CLAIMED);
4271 }
4272
4273 /*
4274 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4314 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4315 DDI_FM_OK) {
4316 ddi_fm_service_impact(ixgbe->dip,
4317 DDI_SERVICE_DEGRADED);
4318 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4319 return (DDI_INTR_CLAIMED);
4320 }
4321
4322 /*
4323 * Check "other" cause bits: any interrupt type other than tx/rx
4324 */
4325 if (eicr & ixgbe->capab->other_intr) {
4326 mutex_enter(&ixgbe->gen_lock);
4327 switch (hw->mac.type) {
4328 case ixgbe_mac_82598EB:
4329 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4330 ixgbe_intr_other_work(ixgbe, eicr);
4331 break;
4332
4333 case ixgbe_mac_82599EB:
4334 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4335 ixgbe_intr_other_work(ixgbe, eicr);
4336 break;
4337
4338 default:
4339 break;
4340 }
4341 mutex_exit(&ixgbe->gen_lock);
4342 }
4343
4344 /* re-enable the interrupts which were automasked */
4345 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4346 }
4347
4348 return (DDI_INTR_CLAIMED);
4349 }
4350
4351 /*
4352 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4353 *
4714 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4715 int8_t cause)
4716 {
4717 struct ixgbe_hw *hw = &ixgbe->hw;
4718 u32 ivar, index;
4719
4720 switch (hw->mac.type) {
4721 case ixgbe_mac_82598EB:
4722 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4723 if (cause == -1) {
4724 cause = 0;
4725 }
4726 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4727 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4728 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4729 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4730 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4731 break;
4732
4733 case ixgbe_mac_82599EB:
4734 if (cause == -1) {
4735 /* other causes */
4736 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4737 index = (intr_alloc_entry & 1) * 8;
4738 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4739 ivar &= ~(0xFF << index);
4740 ivar |= (msix_vector << index);
4741 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4742 } else {
4743 /* tx or rx causes */
4744 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4745 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4746 ivar = IXGBE_READ_REG(hw,
4747 IXGBE_IVAR(intr_alloc_entry >> 1));
4748 ivar &= ~(0xFF << index);
4749 ivar |= (msix_vector << index);
4750 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4751 ivar);
4752 }
4753 break;
4767 */
4768 static void
4769 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4770 {
4771 struct ixgbe_hw *hw = &ixgbe->hw;
4772 u32 ivar, index;
4773
4774 switch (hw->mac.type) {
4775 case ixgbe_mac_82598EB:
4776 if (cause == -1) {
4777 cause = 0;
4778 }
4779 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4780 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4781 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4782 (intr_alloc_entry & 0x3)));
4783 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4784 break;
4785
4786 case ixgbe_mac_82599EB:
4787 if (cause == -1) {
4788 /* other causes */
4789 index = (intr_alloc_entry & 1) * 8;
4790 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4791 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4792 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4793 } else {
4794 /* tx or rx causes */
4795 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4796 ivar = IXGBE_READ_REG(hw,
4797 IXGBE_IVAR(intr_alloc_entry >> 1));
4798 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4799 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4800 ivar);
4801 }
4802 break;
4803
4804 default:
4805 break;
4806 }
4816 */
4817 static void
4818 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4819 {
4820 struct ixgbe_hw *hw = &ixgbe->hw;
4821 u32 ivar, index;
4822
4823 switch (hw->mac.type) {
4824 case ixgbe_mac_82598EB:
4825 if (cause == -1) {
4826 cause = 0;
4827 }
4828 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4829 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4830 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4831 (intr_alloc_entry & 0x3)));
4832 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4833 break;
4834
4835 case ixgbe_mac_82599EB:
4836 if (cause == -1) {
4837 /* other causes */
4838 index = (intr_alloc_entry & 1) * 8;
4839 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4840 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4841 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4842 } else {
4843 /* tx or rx causes */
4844 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4845 ivar = IXGBE_READ_REG(hw,
4846 IXGBE_IVAR(intr_alloc_entry >> 1));
4847 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4848 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4849 ivar);
4850 }
4851 break;
4852
4853 default:
4854 break;
4855 }
4858 /*
4859 * Convert the rx ring index driver maintained to the rx ring index
4860 * in h/w.
4861 */
4862 static uint32_t
4863 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4864 {
4865
4866 struct ixgbe_hw *hw = &ixgbe->hw;
4867 uint32_t rx_ring_per_group, hw_rx_index;
4868
4869 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4870 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4871 return (sw_rx_index);
4872 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4873 switch (hw->mac.type) {
4874 case ixgbe_mac_82598EB:
4875 return (sw_rx_index);
4876
4877 case ixgbe_mac_82599EB:
4878 return (sw_rx_index * 2);
4879
4880 default:
4881 break;
4882 }
4883 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4884 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4885
4886 switch (hw->mac.type) {
4887 case ixgbe_mac_82598EB:
4888 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4889 16 + (sw_rx_index % rx_ring_per_group);
4890 return (hw_rx_index);
4891
4892 case ixgbe_mac_82599EB:
4893 if (ixgbe->num_rx_groups > 32) {
4894 hw_rx_index = (sw_rx_index /
4895 rx_ring_per_group) * 2 +
4896 (sw_rx_index % rx_ring_per_group);
4897 } else {
4898 hw_rx_index = (sw_rx_index /
4899 rx_ring_per_group) * 4 +
4900 (sw_rx_index % rx_ring_per_group);
4901 }
4902 return (hw_rx_index);
4903
4904 default:
4905 break;
4906 }
4907 }
4908
4909 /*
4910 * Should never reach. Just to make compiler happy.
4911 */
4912 return (sw_rx_index);
4977 */
4978 static void
4979 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4980 {
4981 struct ixgbe_hw *hw = &ixgbe->hw;
4982 ixgbe_intr_vector_t *vect; /* vector bitmap */
4983 int r_idx; /* ring index */
4984 int v_idx; /* vector index */
4985 uint32_t hw_index;
4986
4987 /*
4988 * Clear any previous entries
4989 */
4990 switch (hw->mac.type) {
4991 case ixgbe_mac_82598EB:
4992 for (v_idx = 0; v_idx < 25; v_idx++)
4993 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4994 break;
4995
4996 case ixgbe_mac_82599EB:
4997 for (v_idx = 0; v_idx < 64; v_idx++)
4998 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4999 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5000 break;
5001
5002 default:
5003 break;
5004 }
5005
5006 /*
5007 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5008 * tx rings[0] will use RTxQ[1].
5009 */
5010 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5011 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5012 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5013 return;
5014 }
5015
5016 /*
|
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 * Copyright 2012, Nexenta Systems, Inc. All rights reserved.
30 */
31
32 #include "ixgbe_sw.h"
33
34 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
35 static char ixgbe_version[] = "ixgbe 1.1.7";
36
37 /*
38 * Local function protoypes
39 */
40 static int ixgbe_register_mac(ixgbe_t *);
41 static int ixgbe_identify_hardware(ixgbe_t *);
42 static int ixgbe_regs_map(ixgbe_t *);
43 static void ixgbe_init_properties(ixgbe_t *);
44 static int ixgbe_init_driver_settings(ixgbe_t *);
45 static void ixgbe_init_locks(ixgbe_t *);
46 static void ixgbe_destroy_locks(ixgbe_t *);
47 static int ixgbe_init(ixgbe_t *);
48 static int ixgbe_chip_start(ixgbe_t *);
49 static void ixgbe_chip_stop(ixgbe_t *);
276 0xFF8, /* maximum interrupt throttle rate */
277 0, /* minimum interrupt throttle rate */
278 200, /* default interrupt throttle rate */
279 64, /* maximum total msix vectors */
280 16, /* maximum number of ring vectors */
281 2, /* maximum number of other vectors */
282 (IXGBE_EICR_LSC
283 | IXGBE_EICR_GPI_SDP1
284 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
285
286 (IXGBE_SDP1_GPIEN
287 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
288
289 (IXGBE_FLAG_DCA_CAPABLE
290 | IXGBE_FLAG_RSS_CAPABLE
291 | IXGBE_FLAG_VMDQ_CAPABLE
292 | IXGBE_FLAG_RSC_CAPABLE
293 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
294 };
295
296 static adapter_info_t ixgbe_X540_cap = {
297 128, /* maximum number of rx queues */
298 1, /* minimum number of rx queues */
299 128, /* default number of rx queues */
300 64, /* maximum number of rx groups */
301 1, /* minimum number of rx groups */
302 1, /* default number of rx groups */
303 128, /* maximum number of tx queues */
304 1, /* minimum number of tx queues */
305 8, /* default number of tx queues */
306 15500, /* maximum MTU size */
307 0xFF8, /* maximum interrupt throttle rate */
308 0, /* minimum interrupt throttle rate */
309 200, /* default interrupt throttle rate */
310 64, /* maximum total msix vectors */
311 16, /* maximum number of ring vectors */
312 2, /* maximum number of other vectors */
313 /* XXX KEBE ASKS, Do we care about X540's SDP3? */
314 (IXGBE_EICR_LSC
315 | IXGBE_EICR_GPI_SDP0
316 | IXGBE_EICR_GPI_SDP1
317 | IXGBE_EICR_GPI_SDP2
318 /* | IXGBE_EICR_GPI_SDP3 */), /* "other" interrupt types handled */
319
320 (IXGBE_SDP0_GPIEN
321 | IXGBE_SDP1_GPIEN
322 /* | IXGBE_SDP2_GPIEN
323 | IXGBE_SDP3_GPIEN */), /* "other" interrupt types enable mask */
324
325 (IXGBE_FLAG_DCA_CAPABLE
326 | IXGBE_FLAG_RSS_CAPABLE
327 | IXGBE_FLAG_VMDQ_CAPABLE
328 | IXGBE_FLAG_RSC_CAPABLE
329 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
330 /* XXX KEBE ASKS, SFP_PLUG capable?!? */
331 };
332
333 /*
334 * Module Initialization Functions.
335 */
336
337 int
338 _init(void)
339 {
340 int status;
341
342 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
343
344 status = mod_install(&ixgbe_modlinkage);
345
346 if (status != DDI_SUCCESS) {
347 mac_fini_ops(&ixgbe_dev_ops);
348 }
349
350 return (status);
351 }
352
889 ixgbe->capab = &ixgbe_82598eb_cap;
890
891 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
892 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
893 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
894 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
895 }
896 break;
897
898 case ixgbe_mac_82599EB:
899 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
900 ixgbe->capab = &ixgbe_82599eb_cap;
901
902 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
903 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
904 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
905 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
906 }
907 break;
908
909 case ixgbe_mac_X540:
910 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
911 ixgbe->capab = &ixgbe_X540_cap;
912 /*
913 * For now, X540 is all set in its capab structure.
914 * As other X540 variants show up, things can change here.
915 */
916 break;
917
918 default:
919 IXGBE_DEBUGLOG_1(ixgbe,
920 "adapter not supported in ixgbe_identify_hardware(): %d\n",
921 hw->mac.type);
922 return (IXGBE_FAILURE);
923 }
924
925 return (IXGBE_SUCCESS);
926 }
927
928 /*
929 * ixgbe_regs_map - Map the device registers.
930 *
931 */
932 static int
933 ixgbe_regs_map(ixgbe_t *ixgbe)
934 {
935 dev_info_t *devinfo = ixgbe->dip;
936 struct ixgbe_hw *hw = &ixgbe->hw;
937 struct ixgbe_osdep *osdep = &ixgbe->osdep;
1231 */
1232 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1233 /*
1234 * Some PCI-E parts fail the first check due to
1235 * the link being in sleep state. Call it again,
1236 * if it fails a second time it's a real issue.
1237 */
1238 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1239 ixgbe_error(ixgbe,
1240 "Invalid NVM checksum. Please contact "
1241 "the vendor to update the NVM.");
1242 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1243 goto init_fail;
1244 }
1245 }
1246
1247 /*
1248 * Setup default flow control thresholds - enable/disable
1249 * & flow control type is controlled by ixgbe.conf
1250 */
1251 hw->fc.high_water[0] = DEFAULT_FCRTH;
1252 hw->fc.low_water[0] = DEFAULT_FCRTL;
1253 hw->fc.pause_time = DEFAULT_FCPAUSE;
1254 hw->fc.send_xon = B_TRUE;
1255
1256 /*
1257 * Initialize link settings
1258 */
1259 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1260
1261 /*
1262 * Initialize the chipset hardware
1263 */
1264 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1265 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1266 goto init_fail;
1267 }
1268
1269 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1270 goto init_fail;
1271 }
1272
2127 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2128
2129 rx_data->rbd_next = 0;
2130 rx_data->lro_first = 0;
2131
2132 /*
2133 * Setup the Receive Descriptor Control Register (RXDCTL)
2134 * PTHRESH=32 descriptors (half the internal cache)
2135 * HTHRESH=0 descriptors (to minimize latency on fetch)
2136 * WTHRESH defaults to 1 (writeback each descriptor)
2137 */
2138 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2139 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2140
2141 /* Not a valid value for 82599 */
2142 if (hw->mac.type < ixgbe_mac_82599EB) {
2143 reg_val |= 0x0020; /* pthresh */
2144 }
2145 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2146
2147 if (hw->mac.type >= ixgbe_mac_82599EB) {
2148 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2149 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2150 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2151 }
2152
2153 /*
2154 * Setup the Split and Replication Receive Control Register.
2155 * Set the rx buffer size and the advanced descriptor type.
2156 */
2157 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2158 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2159 reg_val |= IXGBE_SRRCTL_DROP_EN;
2160 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2161 }
2162
2163 static void
2164 ixgbe_setup_rx(ixgbe_t *ixgbe)
2165 {
2166 ixgbe_rx_ring_t *rx_ring;
2167 struct ixgbe_hw *hw = &ixgbe->hw;
2364 */
2365 tx_ring->tbd_head_wb = (uint32_t *)
2366 ((uintptr_t)tx_ring->tbd_area.address + size);
2367 *tx_ring->tbd_head_wb = 0;
2368
2369 buf_low = (uint32_t)
2370 (tx_ring->tbd_area.dma_address + size);
2371 buf_high = (uint32_t)
2372 ((tx_ring->tbd_area.dma_address + size) >> 32);
2373
2374 /* Set the head write-back enable bit */
2375 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2376
2377 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2378 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2379
2380 /*
2381 * Turn off relaxed ordering for head write back or it will
2382 * cause problems with the tx recycling
2383 */
2384 #if 0
2385 /* XXX KEBE ASKS --> Should we do what FreeBSD does? */
2386 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2387 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2388 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2389 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2390 if (hw->mac.type == ixgbe_mac_82598EB) {
2391 IXGBE_WRITE_REG(hw,
2392 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2393 } else {
2394 IXGBE_WRITE_REG(hw,
2395 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2396 }
2397 #else
2398 /* XXX KEBE ASKS --> Or should we do what we've always done? */
2399 reg_val = IXGBE_READ_REG(hw,
2400 IXGBE_DCA_TXCTRL(tx_ring->index));
2401 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2402 IXGBE_WRITE_REG(hw,
2403 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2404 #endif
2405 } else {
2406 tx_ring->tbd_head_wb = NULL;
2407 #if 0
2408 /*
2409 * XXX KEBE ASKS --> Should we do what FreeBSD does and
2410 * twiddle TXCTRL_DESC_WR0_EN off anyway?
2411 */
2412 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2413 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2414 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2415 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2416 if (hw->mac.type == ixgbe_mac_82598EB) {
2417 IXGBE_WRITE_REG(hw,
2418 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2419 } else {
2420 IXGBE_WRITE_REG(hw,
2421 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2422 }
2423 #endif
2424 }
2425
2426 tx_ring->tbd_head = 0;
2427 tx_ring->tbd_tail = 0;
2428 tx_ring->tbd_free = tx_ring->ring_size;
2429
2430 if (ixgbe->tx_ring_init == B_TRUE) {
2431 tx_ring->tcb_head = 0;
2432 tx_ring->tcb_tail = 0;
2433 tx_ring->tcb_free = tx_ring->free_list_size;
2434 }
2435
2436 /*
2437 * Initialize the s/w context structure
2438 */
2439 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2440 }
2441
2442 static void
2443 ixgbe_setup_tx(ixgbe_t *ixgbe)
2444 {
2450
2451 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2452 tx_ring = &ixgbe->tx_rings[i];
2453 ixgbe_setup_tx_ring(tx_ring);
2454 }
2455
2456 /*
2457 * Setup the per-ring statistics mapping.
2458 */
2459 ring_mapping = 0;
2460 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2461 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2462 if ((i & 0x3) == 0x3) {
2463 switch (hw->mac.type) {
2464 case ixgbe_mac_82598EB:
2465 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2466 ring_mapping);
2467 break;
2468
2469 case ixgbe_mac_82599EB:
2470 case ixgbe_mac_X540:
2471 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2472 ring_mapping);
2473 break;
2474
2475 default:
2476 break;
2477 }
2478
2479 ring_mapping = 0;
2480 }
2481 }
2482 if (i & 0x3) {
2483 switch (hw->mac.type) {
2484 case ixgbe_mac_82598EB:
2485 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2486 break;
2487
2488 case ixgbe_mac_82599EB:
2489 case ixgbe_mac_X540:
2490 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2491 break;
2492
2493 default:
2494 break;
2495 }
2496 }
2497
2498 /*
2499 * Enable CRC appending and TX padding (for short tx frames)
2500 */
2501 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2502 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2503 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2504
2505 /*
2506 * enable DMA for 82599 and X540 parts
2507 */
2508 if (hw->mac.type >= ixgbe_mac_82599EB) {
2509 /* DMATXCTL.TE must be set after all Tx config is complete */
2510 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2511 reg_val |= IXGBE_DMATXCTL_TE;
2512 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2513 /* XXX KEBE SAYS - FreeBSD sets up MTQC. Should we? */
2514 }
2515
2516 /*
2517 * Enabling tx queues ..
2518 * For 82599 must be done after DMATXCTL.TE is set
2519 */
2520 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2521 tx_ring = &ixgbe->tx_rings[i];
2522 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2523 reg_val |= IXGBE_TXDCTL_ENABLE;
2524 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2525 }
2526 }
2527
2528 /*
2529 * ixgbe_setup_rss - Setup receive-side scaling feature.
2530 */
2531 static void
2532 ixgbe_setup_rss(ixgbe_t *ixgbe)
2533 {
2593 {
2594 struct ixgbe_hw *hw = &ixgbe->hw;
2595 uint32_t vmdctl, i, vtctl;
2596
2597 /*
2598 * Setup the VMDq Control register, enable VMDq based on
2599 * packet destination MAC address:
2600 */
2601 switch (hw->mac.type) {
2602 case ixgbe_mac_82598EB:
2603 /*
2604 * VMDq Enable = 1;
2605 * VMDq Filter = 0; MAC filtering
2606 * Default VMDq output index = 0;
2607 */
2608 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2609 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2610 break;
2611
2612 case ixgbe_mac_82599EB:
2613 case ixgbe_mac_X540:
2614 /*
2615 * Enable VMDq-only.
2616 */
2617 vmdctl = IXGBE_MRQC_VMDQEN;
2618 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2619
2620 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2621 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2622 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2623 }
2624
2625 /*
2626 * Enable Virtualization and Replication.
2627 */
2628 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2629 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2630
2631 /*
2632 * Enable receiving packets to all VFs
2633 */
2687 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2688 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2689 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2690 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2691 IXGBE_MRQC_RSS_FIELD_IPV6 |
2692 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2693 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2694 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2695 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2696
2697 /*
2698 * Enable and Setup VMDq
2699 * VMDq Filter = 0; MAC filtering
2700 * Default VMDq output index = 0;
2701 */
2702 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2703 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2704 break;
2705
2706 case ixgbe_mac_82599EB:
2707 case ixgbe_mac_X540:
2708 /*
2709 * Enable RSS & Setup RSS Hash functions
2710 */
2711 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2712 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2713 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2714 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2715 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2716 IXGBE_MRQC_RSS_FIELD_IPV6 |
2717 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2718 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2719 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2720
2721 /*
2722 * Enable VMDq+RSS.
2723 */
2724 if (ixgbe->num_rx_groups > 32) {
2725 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2726 } else {
2727 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2733 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2734 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2735 }
2736 break;
2737
2738 default:
2739 break;
2740
2741 }
2742
2743 /*
2744 * Disable Packet Checksum to enable RSS for multiple receive queues.
2745 * It is an adapter hardware limitation that Packet Checksum is
2746 * mutually exclusive with RSS.
2747 */
2748 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2749 rxcsum |= IXGBE_RXCSUM_PCSD;
2750 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2751 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2752
2753 if (hw->mac.type >= ixgbe_mac_82599EB) {
2754 /*
2755 * Enable Virtualization and Replication.
2756 */
2757 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2758 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2759
2760 /*
2761 * Enable receiving packets to all VFs
2762 */
2763 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2764 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2765 }
2766 }
2767
2768 /*
2769 * ixgbe_init_unicst - Initialize the unicast addresses.
2770 */
2771 static void
2772 ixgbe_init_unicst(ixgbe_t *ixgbe)
2773 {
2908 * and save them in the hardware registers.
2909 */
2910 static void
2911 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2912 {
2913 uint8_t *mc_addr_list;
2914 uint32_t mc_addr_count;
2915 struct ixgbe_hw *hw = &ixgbe->hw;
2916
2917 ASSERT(mutex_owned(&ixgbe->gen_lock));
2918
2919 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2920
2921 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2922 mc_addr_count = ixgbe->mcast_count;
2923
2924 /*
2925 * Update the multicast addresses to the MTA registers
2926 */
2927 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2928 ixgbe_mc_table_itr, TRUE);
2929 }
2930
2931 /*
2932 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2933 *
2934 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2935 * Different chipsets may have different allowed configuration of vmdq and rss.
2936 */
2937 static void
2938 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2939 {
2940 struct ixgbe_hw *hw = &ixgbe->hw;
2941 uint32_t ring_per_group;
2942
2943 switch (hw->mac.type) {
2944 case ixgbe_mac_82598EB:
2945 /*
2946 * 82598 supports the following combination:
2947 * vmdq no. x rss no.
2948 * [5..16] x 1
2949 * [1..4] x [1..16]
2950 * However 8 rss queue per pool (vmdq) is sufficient for
2951 * most cases.
2952 */
2953 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2954 if (ixgbe->num_rx_groups > 4) {
2955 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2956 } else {
2957 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2958 min(8, ring_per_group);
2959 }
2960
2961 break;
2962
2963 case ixgbe_mac_82599EB:
2964 case ixgbe_mac_X540:
2965 /*
2966 * 82599 supports the following combination:
2967 * vmdq no. x rss no.
2968 * [33..64] x [1..2]
2969 * [2..32] x [1..4]
2970 * 1 x [1..16]
2971 * However 8 rss queue per pool (vmdq) is sufficient for
2972 * most cases.
2973 *
2974 * For now, treat X540 like the 82599.
2975 */
2976 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2977 if (ixgbe->num_rx_groups == 1) {
2978 ixgbe->num_rx_rings = min(8, ring_per_group);
2979 } else if (ixgbe->num_rx_groups <= 32) {
2980 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2981 min(4, ring_per_group);
2982 } else if (ixgbe->num_rx_groups <= 64) {
2983 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2984 min(2, ring_per_group);
2985 }
2986 break;
2987
2988 default:
2989 break;
2990 }
2991
2992 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2993
2994 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
3117 * 1 = force interrupt type MSI-X
3118 * 2 = force interrupt type MSI
3119 * 3 = force interrupt type Legacy
3120 */
3121 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3122 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3123
3124 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3125 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3126 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3127 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3128 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3129 0, 1, DEFAULT_LSO_ENABLE);
3130 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3131 0, 1, DEFAULT_LRO_ENABLE);
3132 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3133 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3134 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3135 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3136
3137 /* Head Write Back not recommended for 82599 and X540 */
3138 if (hw->mac.type >= ixgbe_mac_82599EB) {
3139 ixgbe->tx_head_wb_enable = B_FALSE;
3140 }
3141
3142 /*
3143 * ixgbe LSO needs the tx h/w checksum support.
3144 * LSO will be disabled if tx h/w checksum is not
3145 * enabled.
3146 */
3147 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3148 ixgbe->lso_enable = B_FALSE;
3149 }
3150
3151 /*
3152 * ixgbe LRO needs the rx h/w checksum support.
3153 * LRO will be disabled if rx h/w checksum is not
3154 * enabled.
3155 */
3156 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3157 ixgbe->lro_enable = B_FALSE;
3158 }
3159
3160 /*
3161 * ixgbe LRO only been supported by 82599 and X540 now
3162 */
3163 if (hw->mac.type < ixgbe_mac_82599EB) {
3164 ixgbe->lro_enable = B_FALSE;
3165 }
3166 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3167 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3168 DEFAULT_TX_COPY_THRESHOLD);
3169 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3170 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3171 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3172 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3173 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3174 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3175 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3176 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3177 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3178
3179 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3180 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3181 DEFAULT_RX_COPY_THRESHOLD);
3182 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3183 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3184 DEFAULT_RX_LIMIT_PER_INTR);
3185
3186 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3187 ixgbe->capab->min_intr_throttle,
3188 ixgbe->capab->max_intr_throttle,
3189 ixgbe->capab->def_intr_throttle);
3190 /*
3191 * 82599 and X540 require the interupt throttling rate is
3192 * a multiple of 8. This is enforced by the register
3193 * definiton.
3194 */
3195 if (hw->mac.type >= ixgbe_mac_82599EB)
3196 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3197 }
3198
3199 static void
3200 ixgbe_init_params(ixgbe_t *ixgbe)
3201 {
3202 ixgbe->param_en_10000fdx_cap = 1;
3203 ixgbe->param_en_1000fdx_cap = 1;
3204 ixgbe->param_en_100fdx_cap = 1;
3205 ixgbe->param_adv_10000fdx_cap = 1;
3206 ixgbe->param_adv_1000fdx_cap = 1;
3207 ixgbe->param_adv_100fdx_cap = 1;
3208
3209 ixgbe->param_pause_cap = 1;
3210 ixgbe->param_asym_pause_cap = 1;
3211 ixgbe->param_rem_fault = 0;
3212
3213 ixgbe->param_adv_autoneg_cap = 1;
3214 ixgbe->param_adv_pause_cap = 1;
3215 ixgbe->param_adv_asym_pause_cap = 1;
3301 /*
3302 * ixgbe_driver_link_check - Link status processing.
3303 *
3304 * This function can be called in both kernel context and interrupt context
3305 */
3306 static void
3307 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3308 {
3309 struct ixgbe_hw *hw = &ixgbe->hw;
3310 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3311 boolean_t link_up = B_FALSE;
3312 boolean_t link_changed = B_FALSE;
3313
3314 ASSERT(mutex_owned(&ixgbe->gen_lock));
3315
3316 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3317 if (link_up) {
3318 ixgbe->link_check_complete = B_TRUE;
3319
3320 /* Link is up, enable flow control settings */
3321 (void) ixgbe_fc_enable(hw);
3322
3323 /*
3324 * The Link is up, check whether it was marked as down earlier
3325 */
3326 if (ixgbe->link_state != LINK_STATE_UP) {
3327 switch (speed) {
3328 case IXGBE_LINK_SPEED_10GB_FULL:
3329 ixgbe->link_speed = SPEED_10GB;
3330 break;
3331 case IXGBE_LINK_SPEED_1GB_FULL:
3332 ixgbe->link_speed = SPEED_1GB;
3333 break;
3334 case IXGBE_LINK_SPEED_100_FULL:
3335 ixgbe->link_speed = SPEED_100;
3336 }
3337 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3338 ixgbe->link_state = LINK_STATE_UP;
3339 link_changed = B_TRUE;
3340 }
3341 } else {
3820 eiac = 0;
3821
3822 /*
3823 * General purpose interrupt enable.
3824 * For 82599, extended interrupt automask enable
3825 * only in MSI or MSI-X mode
3826 */
3827 if ((hw->mac.type < ixgbe_mac_82599EB) ||
3828 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3829 gpie |= IXGBE_GPIE_EIAME;
3830 }
3831 }
3832
3833 /* Enable specific "other" interrupt types */
3834 switch (hw->mac.type) {
3835 case ixgbe_mac_82598EB:
3836 gpie |= ixgbe->capab->other_gpie;
3837 break;
3838
3839 case ixgbe_mac_82599EB:
3840 case ixgbe_mac_X540:
3841 gpie |= ixgbe->capab->other_gpie;
3842
3843 /* Enable RSC Delay 8us when LRO enabled */
3844 if (ixgbe->lro_enable) {
3845 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3846 }
3847 break;
3848
3849 default:
3850 break;
3851 }
3852
3853 /* write to interrupt control registers */
3854 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3855 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3856 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3857 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3858 IXGBE_WRITE_FLUSH(hw);
3859 }
3860
4014 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4015 &atlas);
4016 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
4017 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4018 atlas);
4019
4020 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4021 &atlas);
4022 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
4023 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4024 atlas);
4025
4026 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4027 &atlas);
4028 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4029 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4030 atlas);
4031 break;
4032
4033 case ixgbe_mac_82599EB:
4034 case ixgbe_mac_X540:
4035 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4036 reg |= (IXGBE_AUTOC_FLU |
4037 IXGBE_AUTOC_10G_KX4);
4038 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4039
4040 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4041 B_FALSE, B_TRUE);
4042 break;
4043
4044 default:
4045 break;
4046 }
4047 }
4048
4049 #pragma inline(ixgbe_intr_rx_work)
4050 /*
4051 * ixgbe_intr_rx_work - RX processing of ISR.
4052 */
4053 static void
4054 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4233 * Recycle the tx descriptors
4234 */
4235 tx_ring = &ixgbe->tx_rings[0];
4236 tx_ring->tx_recycle(tx_ring);
4237
4238 /*
4239 * Schedule the re-transmit
4240 */
4241 tx_reschedule = (tx_ring->reschedule &&
4242 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4243 }
4244
4245 /* any interrupt type other than tx/rx */
4246 if (eicr & ixgbe->capab->other_intr) {
4247 switch (hw->mac.type) {
4248 case ixgbe_mac_82598EB:
4249 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4250 break;
4251
4252 case ixgbe_mac_82599EB:
4253 case ixgbe_mac_X540:
4254 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4255 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4256 break;
4257
4258 default:
4259 break;
4260 }
4261 ixgbe_intr_other_work(ixgbe, eicr);
4262 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4263 }
4264
4265 mutex_exit(&ixgbe->gen_lock);
4266
4267 result = DDI_INTR_CLAIMED;
4268 } else {
4269 mutex_exit(&ixgbe->gen_lock);
4270
4271 /*
4272 * No interrupt cause bits set: don't claim this interrupt.
4273 */
4327 if (eicr & 0x1) {
4328 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4329 }
4330
4331 /*
4332 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4333 */
4334 if (eicr & 0x2) {
4335 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4336 }
4337
4338 /* any interrupt type other than tx/rx */
4339 if (eicr & ixgbe->capab->other_intr) {
4340 mutex_enter(&ixgbe->gen_lock);
4341 switch (hw->mac.type) {
4342 case ixgbe_mac_82598EB:
4343 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4344 break;
4345
4346 case ixgbe_mac_82599EB:
4347 case ixgbe_mac_X540:
4348 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4349 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4350 break;
4351
4352 default:
4353 break;
4354 }
4355 ixgbe_intr_other_work(ixgbe, eicr);
4356 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4357 mutex_exit(&ixgbe->gen_lock);
4358 }
4359
4360 /* re-enable the interrupts which were automasked */
4361 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4362
4363 return (DDI_INTR_CLAIMED);
4364 }
4365
4366 /*
4367 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4407 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4408 DDI_FM_OK) {
4409 ddi_fm_service_impact(ixgbe->dip,
4410 DDI_SERVICE_DEGRADED);
4411 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4412 return (DDI_INTR_CLAIMED);
4413 }
4414
4415 /*
4416 * Check "other" cause bits: any interrupt type other than tx/rx
4417 */
4418 if (eicr & ixgbe->capab->other_intr) {
4419 mutex_enter(&ixgbe->gen_lock);
4420 switch (hw->mac.type) {
4421 case ixgbe_mac_82598EB:
4422 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4423 ixgbe_intr_other_work(ixgbe, eicr);
4424 break;
4425
4426 case ixgbe_mac_82599EB:
4427 case ixgbe_mac_X540:
4428 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4429 ixgbe_intr_other_work(ixgbe, eicr);
4430 break;
4431
4432 default:
4433 break;
4434 }
4435 mutex_exit(&ixgbe->gen_lock);
4436 }
4437
4438 /* re-enable the interrupts which were automasked */
4439 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4440 }
4441
4442 return (DDI_INTR_CLAIMED);
4443 }
4444
4445 /*
4446 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4447 *
4808 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4809 int8_t cause)
4810 {
4811 struct ixgbe_hw *hw = &ixgbe->hw;
4812 u32 ivar, index;
4813
4814 switch (hw->mac.type) {
4815 case ixgbe_mac_82598EB:
4816 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4817 if (cause == -1) {
4818 cause = 0;
4819 }
4820 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4821 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4822 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4823 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4824 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4825 break;
4826
4827 case ixgbe_mac_82599EB:
4828 case ixgbe_mac_X540:
4829 if (cause == -1) {
4830 /* other causes */
4831 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4832 index = (intr_alloc_entry & 1) * 8;
4833 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4834 ivar &= ~(0xFF << index);
4835 ivar |= (msix_vector << index);
4836 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4837 } else {
4838 /* tx or rx causes */
4839 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4840 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4841 ivar = IXGBE_READ_REG(hw,
4842 IXGBE_IVAR(intr_alloc_entry >> 1));
4843 ivar &= ~(0xFF << index);
4844 ivar |= (msix_vector << index);
4845 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4846 ivar);
4847 }
4848 break;
4862 */
4863 static void
4864 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4865 {
4866 struct ixgbe_hw *hw = &ixgbe->hw;
4867 u32 ivar, index;
4868
4869 switch (hw->mac.type) {
4870 case ixgbe_mac_82598EB:
4871 if (cause == -1) {
4872 cause = 0;
4873 }
4874 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4875 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4876 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4877 (intr_alloc_entry & 0x3)));
4878 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4879 break;
4880
4881 case ixgbe_mac_82599EB:
4882 case ixgbe_mac_X540:
4883 if (cause == -1) {
4884 /* other causes */
4885 index = (intr_alloc_entry & 1) * 8;
4886 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4887 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4888 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4889 } else {
4890 /* tx or rx causes */
4891 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4892 ivar = IXGBE_READ_REG(hw,
4893 IXGBE_IVAR(intr_alloc_entry >> 1));
4894 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4895 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4896 ivar);
4897 }
4898 break;
4899
4900 default:
4901 break;
4902 }
4912 */
4913 static void
4914 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4915 {
4916 struct ixgbe_hw *hw = &ixgbe->hw;
4917 u32 ivar, index;
4918
4919 switch (hw->mac.type) {
4920 case ixgbe_mac_82598EB:
4921 if (cause == -1) {
4922 cause = 0;
4923 }
4924 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4925 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4926 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4927 (intr_alloc_entry & 0x3)));
4928 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4929 break;
4930
4931 case ixgbe_mac_82599EB:
4932 case ixgbe_mac_X540:
4933 if (cause == -1) {
4934 /* other causes */
4935 index = (intr_alloc_entry & 1) * 8;
4936 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4937 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4938 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4939 } else {
4940 /* tx or rx causes */
4941 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4942 ivar = IXGBE_READ_REG(hw,
4943 IXGBE_IVAR(intr_alloc_entry >> 1));
4944 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4945 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4946 ivar);
4947 }
4948 break;
4949
4950 default:
4951 break;
4952 }
4955 /*
4956 * Convert the rx ring index driver maintained to the rx ring index
4957 * in h/w.
4958 */
4959 static uint32_t
4960 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4961 {
4962
4963 struct ixgbe_hw *hw = &ixgbe->hw;
4964 uint32_t rx_ring_per_group, hw_rx_index;
4965
4966 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4967 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4968 return (sw_rx_index);
4969 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4970 switch (hw->mac.type) {
4971 case ixgbe_mac_82598EB:
4972 return (sw_rx_index);
4973
4974 case ixgbe_mac_82599EB:
4975 case ixgbe_mac_X540:
4976 return (sw_rx_index * 2);
4977
4978 default:
4979 break;
4980 }
4981 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4982 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4983
4984 switch (hw->mac.type) {
4985 case ixgbe_mac_82598EB:
4986 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4987 16 + (sw_rx_index % rx_ring_per_group);
4988 return (hw_rx_index);
4989
4990 case ixgbe_mac_82599EB:
4991 case ixgbe_mac_X540:
4992 if (ixgbe->num_rx_groups > 32) {
4993 hw_rx_index = (sw_rx_index /
4994 rx_ring_per_group) * 2 +
4995 (sw_rx_index % rx_ring_per_group);
4996 } else {
4997 hw_rx_index = (sw_rx_index /
4998 rx_ring_per_group) * 4 +
4999 (sw_rx_index % rx_ring_per_group);
5000 }
5001 return (hw_rx_index);
5002
5003 default:
5004 break;
5005 }
5006 }
5007
5008 /*
5009 * Should never reach. Just to make compiler happy.
5010 */
5011 return (sw_rx_index);
5076 */
5077 static void
5078 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5079 {
5080 struct ixgbe_hw *hw = &ixgbe->hw;
5081 ixgbe_intr_vector_t *vect; /* vector bitmap */
5082 int r_idx; /* ring index */
5083 int v_idx; /* vector index */
5084 uint32_t hw_index;
5085
5086 /*
5087 * Clear any previous entries
5088 */
5089 switch (hw->mac.type) {
5090 case ixgbe_mac_82598EB:
5091 for (v_idx = 0; v_idx < 25; v_idx++)
5092 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5093 break;
5094
5095 case ixgbe_mac_82599EB:
5096 case ixgbe_mac_X540:
5097 for (v_idx = 0; v_idx < 64; v_idx++)
5098 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5099 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5100 break;
5101
5102 default:
5103 break;
5104 }
5105
5106 /*
5107 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5108 * tx rings[0] will use RTxQ[1].
5109 */
5110 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5111 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5112 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5113 return;
5114 }
5115
5116 /*
|