8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 */
29
30 #include "ixgbe_sw.h"
31
32 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
33 static char ixgbe_version[] = "ixgbe 1.1.7";
34
35 /*
36 * Local function protoypes
37 */
38 static int ixgbe_register_mac(ixgbe_t *);
39 static int ixgbe_identify_hardware(ixgbe_t *);
40 static int ixgbe_regs_map(ixgbe_t *);
41 static void ixgbe_init_properties(ixgbe_t *);
42 static int ixgbe_init_driver_settings(ixgbe_t *);
43 static void ixgbe_init_locks(ixgbe_t *);
44 static void ixgbe_destroy_locks(ixgbe_t *);
45 static int ixgbe_init(ixgbe_t *);
46 static int ixgbe_chip_start(ixgbe_t *);
47 static void ixgbe_chip_stop(ixgbe_t *);
274 0xFF8, /* maximum interrupt throttle rate */
275 0, /* minimum interrupt throttle rate */
276 200, /* default interrupt throttle rate */
277 64, /* maximum total msix vectors */
278 16, /* maximum number of ring vectors */
279 2, /* maximum number of other vectors */
280 (IXGBE_EICR_LSC
281 | IXGBE_EICR_GPI_SDP1
282 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
283
284 (IXGBE_SDP1_GPIEN
285 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
286
287 (IXGBE_FLAG_DCA_CAPABLE
288 | IXGBE_FLAG_RSS_CAPABLE
289 | IXGBE_FLAG_VMDQ_CAPABLE
290 | IXGBE_FLAG_RSC_CAPABLE
291 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
292 };
293
294 /*
295 * Module Initialization Functions.
296 */
297
298 int
299 _init(void)
300 {
301 int status;
302
303 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
304
305 status = mod_install(&ixgbe_modlinkage);
306
307 if (status != DDI_SUCCESS) {
308 mac_fini_ops(&ixgbe_dev_ops);
309 }
310
311 return (status);
312 }
313
850 ixgbe->capab = &ixgbe_82598eb_cap;
851
852 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
853 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
854 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
855 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
856 }
857 break;
858
859 case ixgbe_mac_82599EB:
860 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
861 ixgbe->capab = &ixgbe_82599eb_cap;
862
863 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
864 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
865 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
866 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
867 }
868 break;
869
870 default:
871 IXGBE_DEBUGLOG_1(ixgbe,
872 "adapter not supported in ixgbe_identify_hardware(): %d\n",
873 hw->mac.type);
874 return (IXGBE_FAILURE);
875 }
876
877 return (IXGBE_SUCCESS);
878 }
879
880 /*
881 * ixgbe_regs_map - Map the device registers.
882 *
883 */
884 static int
885 ixgbe_regs_map(ixgbe_t *ixgbe)
886 {
887 dev_info_t *devinfo = ixgbe->dip;
888 struct ixgbe_hw *hw = &ixgbe->hw;
889 struct ixgbe_osdep *osdep = &ixgbe->osdep;
1183 */
1184 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1185 /*
1186 * Some PCI-E parts fail the first check due to
1187 * the link being in sleep state. Call it again,
1188 * if it fails a second time it's a real issue.
1189 */
1190 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1191 ixgbe_error(ixgbe,
1192 "Invalid NVM checksum. Please contact "
1193 "the vendor to update the NVM.");
1194 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1195 goto init_fail;
1196 }
1197 }
1198
1199 /*
1200 * Setup default flow control thresholds - enable/disable
1201 * & flow control type is controlled by ixgbe.conf
1202 */
1203 hw->fc.high_water = DEFAULT_FCRTH;
1204 hw->fc.low_water = DEFAULT_FCRTL;
1205 hw->fc.pause_time = DEFAULT_FCPAUSE;
1206 hw->fc.send_xon = B_TRUE;
1207
1208 /*
1209 * Initialize link settings
1210 */
1211 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1212
1213 /*
1214 * Initialize the chipset hardware
1215 */
1216 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1217 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1218 goto init_fail;
1219 }
1220
1221 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1222 goto init_fail;
1223 }
1224
2079 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2080
2081 rx_data->rbd_next = 0;
2082 rx_data->lro_first = 0;
2083
2084 /*
2085 * Setup the Receive Descriptor Control Register (RXDCTL)
2086 * PTHRESH=32 descriptors (half the internal cache)
2087 * HTHRESH=0 descriptors (to minimize latency on fetch)
2088 * WTHRESH defaults to 1 (writeback each descriptor)
2089 */
2090 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2091 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2092
2093 /* Not a valid value for 82599 */
2094 if (hw->mac.type < ixgbe_mac_82599EB) {
2095 reg_val |= 0x0020; /* pthresh */
2096 }
2097 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2098
2099 if (hw->mac.type == ixgbe_mac_82599EB) {
2100 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2101 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2102 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2103 }
2104
2105 /*
2106 * Setup the Split and Replication Receive Control Register.
2107 * Set the rx buffer size and the advanced descriptor type.
2108 */
2109 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2110 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2111 reg_val |= IXGBE_SRRCTL_DROP_EN;
2112 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2113 }
2114
2115 static void
2116 ixgbe_setup_rx(ixgbe_t *ixgbe)
2117 {
2118 ixgbe_rx_ring_t *rx_ring;
2119 struct ixgbe_hw *hw = &ixgbe->hw;
2316 */
2317 tx_ring->tbd_head_wb = (uint32_t *)
2318 ((uintptr_t)tx_ring->tbd_area.address + size);
2319 *tx_ring->tbd_head_wb = 0;
2320
2321 buf_low = (uint32_t)
2322 (tx_ring->tbd_area.dma_address + size);
2323 buf_high = (uint32_t)
2324 ((tx_ring->tbd_area.dma_address + size) >> 32);
2325
2326 /* Set the head write-back enable bit */
2327 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2328
2329 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2330 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2331
2332 /*
2333 * Turn off relaxed ordering for head write back or it will
2334 * cause problems with the tx recycling
2335 */
2336 reg_val = IXGBE_READ_REG(hw,
2337 IXGBE_DCA_TXCTRL(tx_ring->index));
2338 reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2339 IXGBE_WRITE_REG(hw,
2340 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2341 } else {
2342 tx_ring->tbd_head_wb = NULL;
2343 }
2344
2345 tx_ring->tbd_head = 0;
2346 tx_ring->tbd_tail = 0;
2347 tx_ring->tbd_free = tx_ring->ring_size;
2348
2349 if (ixgbe->tx_ring_init == B_TRUE) {
2350 tx_ring->tcb_head = 0;
2351 tx_ring->tcb_tail = 0;
2352 tx_ring->tcb_free = tx_ring->free_list_size;
2353 }
2354
2355 /*
2356 * Initialize the s/w context structure
2357 */
2358 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2359 }
2360
2361 static void
2362 ixgbe_setup_tx(ixgbe_t *ixgbe)
2363 {
2369
2370 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2371 tx_ring = &ixgbe->tx_rings[i];
2372 ixgbe_setup_tx_ring(tx_ring);
2373 }
2374
2375 /*
2376 * Setup the per-ring statistics mapping.
2377 */
2378 ring_mapping = 0;
2379 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2380 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2381 if ((i & 0x3) == 0x3) {
2382 switch (hw->mac.type) {
2383 case ixgbe_mac_82598EB:
2384 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2385 ring_mapping);
2386 break;
2387
2388 case ixgbe_mac_82599EB:
2389 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2390 ring_mapping);
2391 break;
2392
2393 default:
2394 break;
2395 }
2396
2397 ring_mapping = 0;
2398 }
2399 }
2400 if (i & 0x3) {
2401 switch (hw->mac.type) {
2402 case ixgbe_mac_82598EB:
2403 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2404 break;
2405
2406 case ixgbe_mac_82599EB:
2407 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2408 break;
2409
2410 default:
2411 break;
2412 }
2413 }
2414
2415 /*
2416 * Enable CRC appending and TX padding (for short tx frames)
2417 */
2418 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2419 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2420 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2421
2422 /*
2423 * enable DMA for 82599 parts
2424 */
2425 if (hw->mac.type == ixgbe_mac_82599EB) {
2426 /* DMATXCTL.TE must be set after all Tx config is complete */
2427 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2428 reg_val |= IXGBE_DMATXCTL_TE;
2429 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2430 }
2431
2432 /*
2433 * Enabling tx queues ..
2434 * For 82599 must be done after DMATXCTL.TE is set
2435 */
2436 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2437 tx_ring = &ixgbe->tx_rings[i];
2438 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2439 reg_val |= IXGBE_TXDCTL_ENABLE;
2440 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2441 }
2442 }
2443
2444 /*
2445 * ixgbe_setup_rss - Setup receive-side scaling feature.
2446 */
2447 static void
2448 ixgbe_setup_rss(ixgbe_t *ixgbe)
2449 {
2509 {
2510 struct ixgbe_hw *hw = &ixgbe->hw;
2511 uint32_t vmdctl, i, vtctl;
2512
2513 /*
2514 * Setup the VMDq Control register, enable VMDq based on
2515 * packet destination MAC address:
2516 */
2517 switch (hw->mac.type) {
2518 case ixgbe_mac_82598EB:
2519 /*
2520 * VMDq Enable = 1;
2521 * VMDq Filter = 0; MAC filtering
2522 * Default VMDq output index = 0;
2523 */
2524 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2525 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2526 break;
2527
2528 case ixgbe_mac_82599EB:
2529 /*
2530 * Enable VMDq-only.
2531 */
2532 vmdctl = IXGBE_MRQC_VMDQEN;
2533 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2534
2535 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2536 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2537 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2538 }
2539
2540 /*
2541 * Enable Virtualization and Replication.
2542 */
2543 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2544 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2545
2546 /*
2547 * Enable receiving packets to all VFs
2548 */
2602 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2603 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2604 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2605 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2606 IXGBE_MRQC_RSS_FIELD_IPV6 |
2607 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2608 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2609 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2610 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2611
2612 /*
2613 * Enable and Setup VMDq
2614 * VMDq Filter = 0; MAC filtering
2615 * Default VMDq output index = 0;
2616 */
2617 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2618 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2619 break;
2620
2621 case ixgbe_mac_82599EB:
2622 /*
2623 * Enable RSS & Setup RSS Hash functions
2624 */
2625 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2626 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2627 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2628 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2629 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2630 IXGBE_MRQC_RSS_FIELD_IPV6 |
2631 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2632 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2633 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2634
2635 /*
2636 * Enable VMDq+RSS.
2637 */
2638 if (ixgbe->num_rx_groups > 32) {
2639 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2640 } else {
2641 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2647 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2648 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2649 }
2650 break;
2651
2652 default:
2653 break;
2654
2655 }
2656
2657 /*
2658 * Disable Packet Checksum to enable RSS for multiple receive queues.
2659 * It is an adapter hardware limitation that Packet Checksum is
2660 * mutually exclusive with RSS.
2661 */
2662 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2663 rxcsum |= IXGBE_RXCSUM_PCSD;
2664 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2665 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2666
2667 if (hw->mac.type == ixgbe_mac_82599EB) {
2668 /*
2669 * Enable Virtualization and Replication.
2670 */
2671 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2672 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2673
2674 /*
2675 * Enable receiving packets to all VFs
2676 */
2677 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2678 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2679 }
2680 }
2681
2682 /*
2683 * ixgbe_init_unicst - Initialize the unicast addresses.
2684 */
2685 static void
2686 ixgbe_init_unicst(ixgbe_t *ixgbe)
2687 {
2822 * and save them in the hardware registers.
2823 */
2824 static void
2825 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2826 {
2827 uint8_t *mc_addr_list;
2828 uint32_t mc_addr_count;
2829 struct ixgbe_hw *hw = &ixgbe->hw;
2830
2831 ASSERT(mutex_owned(&ixgbe->gen_lock));
2832
2833 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2834
2835 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2836 mc_addr_count = ixgbe->mcast_count;
2837
2838 /*
2839 * Update the multicast addresses to the MTA registers
2840 */
2841 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2842 ixgbe_mc_table_itr);
2843 }
2844
2845 /*
2846 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2847 *
2848 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2849 * Different chipsets may have different allowed configuration of vmdq and rss.
2850 */
2851 static void
2852 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2853 {
2854 struct ixgbe_hw *hw = &ixgbe->hw;
2855 uint32_t ring_per_group;
2856
2857 switch (hw->mac.type) {
2858 case ixgbe_mac_82598EB:
2859 /*
2860 * 82598 supports the following combination:
2861 * vmdq no. x rss no.
2862 * [5..16] x 1
2863 * [1..4] x [1..16]
2864 * However 8 rss queue per pool (vmdq) is sufficient for
2865 * most cases.
2866 */
2867 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2868 if (ixgbe->num_rx_groups > 4) {
2869 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2870 } else {
2871 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2872 min(8, ring_per_group);
2873 }
2874
2875 break;
2876
2877 case ixgbe_mac_82599EB:
2878 /*
2879 * 82599 supports the following combination:
2880 * vmdq no. x rss no.
2881 * [33..64] x [1..2]
2882 * [2..32] x [1..4]
2883 * 1 x [1..16]
2884 * However 8 rss queue per pool (vmdq) is sufficient for
2885 * most cases.
2886 */
2887 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2888 if (ixgbe->num_rx_groups == 1) {
2889 ixgbe->num_rx_rings = min(8, ring_per_group);
2890 } else if (ixgbe->num_rx_groups <= 32) {
2891 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2892 min(4, ring_per_group);
2893 } else if (ixgbe->num_rx_groups <= 64) {
2894 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2895 min(2, ring_per_group);
2896 }
2897 break;
2898
2899 default:
2900 break;
2901 }
2902
2903 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2904
2905 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
3028 * 1 = force interrupt type MSI-X
3029 * 2 = force interrupt type MSI
3030 * 3 = force interrupt type Legacy
3031 */
3032 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3033 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3034
3035 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3036 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3037 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3038 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3039 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3040 0, 1, DEFAULT_LSO_ENABLE);
3041 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3042 0, 1, DEFAULT_LRO_ENABLE);
3043 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3044 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3045 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3046 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3047
3048 /* Head Write Back not recommended for 82599 */
3049 if (hw->mac.type >= ixgbe_mac_82599EB) {
3050 ixgbe->tx_head_wb_enable = B_FALSE;
3051 }
3052
3053 /*
3054 * ixgbe LSO needs the tx h/w checksum support.
3055 * LSO will be disabled if tx h/w checksum is not
3056 * enabled.
3057 */
3058 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3059 ixgbe->lso_enable = B_FALSE;
3060 }
3061
3062 /*
3063 * ixgbe LRO needs the rx h/w checksum support.
3064 * LRO will be disabled if rx h/w checksum is not
3065 * enabled.
3066 */
3067 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3068 ixgbe->lro_enable = B_FALSE;
3069 }
3070
3071 /*
3072 * ixgbe LRO only been supported by 82599 now
3073 */
3074 if (hw->mac.type != ixgbe_mac_82599EB) {
3075 ixgbe->lro_enable = B_FALSE;
3076 }
3077 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3078 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3079 DEFAULT_TX_COPY_THRESHOLD);
3080 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3081 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3082 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3083 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3084 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3085 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3086 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3087 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3088 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3089
3090 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3091 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3092 DEFAULT_RX_COPY_THRESHOLD);
3093 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3094 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3095 DEFAULT_RX_LIMIT_PER_INTR);
3096
3097 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3098 ixgbe->capab->min_intr_throttle,
3099 ixgbe->capab->max_intr_throttle,
3100 ixgbe->capab->def_intr_throttle);
3101 /*
3102 * 82599 requires the interupt throttling rate is
3103 * a multiple of 8. This is enforced by the register
3104 * definiton.
3105 */
3106 if (hw->mac.type == ixgbe_mac_82599EB)
3107 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3108 }
3109
3110 static void
3111 ixgbe_init_params(ixgbe_t *ixgbe)
3112 {
3113 ixgbe->param_en_10000fdx_cap = 1;
3114 ixgbe->param_en_1000fdx_cap = 1;
3115 ixgbe->param_en_100fdx_cap = 1;
3116 ixgbe->param_adv_10000fdx_cap = 1;
3117 ixgbe->param_adv_1000fdx_cap = 1;
3118 ixgbe->param_adv_100fdx_cap = 1;
3119
3120 ixgbe->param_pause_cap = 1;
3121 ixgbe->param_asym_pause_cap = 1;
3122 ixgbe->param_rem_fault = 0;
3123
3124 ixgbe->param_adv_autoneg_cap = 1;
3125 ixgbe->param_adv_pause_cap = 1;
3126 ixgbe->param_adv_asym_pause_cap = 1;
3212 /*
3213 * ixgbe_driver_link_check - Link status processing.
3214 *
3215 * This function can be called in both kernel context and interrupt context
3216 */
3217 static void
3218 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3219 {
3220 struct ixgbe_hw *hw = &ixgbe->hw;
3221 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3222 boolean_t link_up = B_FALSE;
3223 boolean_t link_changed = B_FALSE;
3224
3225 ASSERT(mutex_owned(&ixgbe->gen_lock));
3226
3227 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3228 if (link_up) {
3229 ixgbe->link_check_complete = B_TRUE;
3230
3231 /* Link is up, enable flow control settings */
3232 (void) ixgbe_fc_enable(hw, 0);
3233
3234 /*
3235 * The Link is up, check whether it was marked as down earlier
3236 */
3237 if (ixgbe->link_state != LINK_STATE_UP) {
3238 switch (speed) {
3239 case IXGBE_LINK_SPEED_10GB_FULL:
3240 ixgbe->link_speed = SPEED_10GB;
3241 break;
3242 case IXGBE_LINK_SPEED_1GB_FULL:
3243 ixgbe->link_speed = SPEED_1GB;
3244 break;
3245 case IXGBE_LINK_SPEED_100_FULL:
3246 ixgbe->link_speed = SPEED_100;
3247 }
3248 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3249 ixgbe->link_state = LINK_STATE_UP;
3250 link_changed = B_TRUE;
3251 }
3252 } else {
3731 eiac = 0;
3732
3733 /*
3734 * General purpose interrupt enable.
3735 * For 82599, extended interrupt automask enable
3736 * only in MSI or MSI-X mode
3737 */
3738 if ((hw->mac.type < ixgbe_mac_82599EB) ||
3739 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3740 gpie |= IXGBE_GPIE_EIAME;
3741 }
3742 }
3743
3744 /* Enable specific "other" interrupt types */
3745 switch (hw->mac.type) {
3746 case ixgbe_mac_82598EB:
3747 gpie |= ixgbe->capab->other_gpie;
3748 break;
3749
3750 case ixgbe_mac_82599EB:
3751 gpie |= ixgbe->capab->other_gpie;
3752
3753 /* Enable RSC Delay 8us when LRO enabled */
3754 if (ixgbe->lro_enable) {
3755 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3756 }
3757 break;
3758
3759 default:
3760 break;
3761 }
3762
3763 /* write to interrupt control registers */
3764 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3765 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3766 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3767 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3768 IXGBE_WRITE_FLUSH(hw);
3769 }
3770
3924 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3925 &atlas);
3926 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3927 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3928 atlas);
3929
3930 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3931 &atlas);
3932 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3933 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3934 atlas);
3935
3936 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3937 &atlas);
3938 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3939 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3940 atlas);
3941 break;
3942
3943 case ixgbe_mac_82599EB:
3944 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3945 reg |= (IXGBE_AUTOC_FLU |
3946 IXGBE_AUTOC_10G_KX4);
3947 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3948
3949 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
3950 B_FALSE, B_TRUE);
3951 break;
3952
3953 default:
3954 break;
3955 }
3956 }
3957
3958 #pragma inline(ixgbe_intr_rx_work)
3959 /*
3960 * ixgbe_intr_rx_work - RX processing of ISR.
3961 */
3962 static void
3963 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4142 * Recycle the tx descriptors
4143 */
4144 tx_ring = &ixgbe->tx_rings[0];
4145 tx_ring->tx_recycle(tx_ring);
4146
4147 /*
4148 * Schedule the re-transmit
4149 */
4150 tx_reschedule = (tx_ring->reschedule &&
4151 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4152 }
4153
4154 /* any interrupt type other than tx/rx */
4155 if (eicr & ixgbe->capab->other_intr) {
4156 switch (hw->mac.type) {
4157 case ixgbe_mac_82598EB:
4158 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4159 break;
4160
4161 case ixgbe_mac_82599EB:
4162 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4163 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4164 break;
4165
4166 default:
4167 break;
4168 }
4169 ixgbe_intr_other_work(ixgbe, eicr);
4170 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4171 }
4172
4173 mutex_exit(&ixgbe->gen_lock);
4174
4175 result = DDI_INTR_CLAIMED;
4176 } else {
4177 mutex_exit(&ixgbe->gen_lock);
4178
4179 /*
4180 * No interrupt cause bits set: don't claim this interrupt.
4181 */
4235 if (eicr & 0x1) {
4236 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4237 }
4238
4239 /*
4240 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4241 */
4242 if (eicr & 0x2) {
4243 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4244 }
4245
4246 /* any interrupt type other than tx/rx */
4247 if (eicr & ixgbe->capab->other_intr) {
4248 mutex_enter(&ixgbe->gen_lock);
4249 switch (hw->mac.type) {
4250 case ixgbe_mac_82598EB:
4251 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4252 break;
4253
4254 case ixgbe_mac_82599EB:
4255 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4256 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4257 break;
4258
4259 default:
4260 break;
4261 }
4262 ixgbe_intr_other_work(ixgbe, eicr);
4263 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4264 mutex_exit(&ixgbe->gen_lock);
4265 }
4266
4267 /* re-enable the interrupts which were automasked */
4268 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4269
4270 return (DDI_INTR_CLAIMED);
4271 }
4272
4273 /*
4274 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4314 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4315 DDI_FM_OK) {
4316 ddi_fm_service_impact(ixgbe->dip,
4317 DDI_SERVICE_DEGRADED);
4318 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4319 return (DDI_INTR_CLAIMED);
4320 }
4321
4322 /*
4323 * Check "other" cause bits: any interrupt type other than tx/rx
4324 */
4325 if (eicr & ixgbe->capab->other_intr) {
4326 mutex_enter(&ixgbe->gen_lock);
4327 switch (hw->mac.type) {
4328 case ixgbe_mac_82598EB:
4329 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4330 ixgbe_intr_other_work(ixgbe, eicr);
4331 break;
4332
4333 case ixgbe_mac_82599EB:
4334 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4335 ixgbe_intr_other_work(ixgbe, eicr);
4336 break;
4337
4338 default:
4339 break;
4340 }
4341 mutex_exit(&ixgbe->gen_lock);
4342 }
4343
4344 /* re-enable the interrupts which were automasked */
4345 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4346 }
4347
4348 return (DDI_INTR_CLAIMED);
4349 }
4350
4351 /*
4352 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4353 *
4714 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4715 int8_t cause)
4716 {
4717 struct ixgbe_hw *hw = &ixgbe->hw;
4718 u32 ivar, index;
4719
4720 switch (hw->mac.type) {
4721 case ixgbe_mac_82598EB:
4722 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4723 if (cause == -1) {
4724 cause = 0;
4725 }
4726 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4727 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4728 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4729 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4730 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4731 break;
4732
4733 case ixgbe_mac_82599EB:
4734 if (cause == -1) {
4735 /* other causes */
4736 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4737 index = (intr_alloc_entry & 1) * 8;
4738 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4739 ivar &= ~(0xFF << index);
4740 ivar |= (msix_vector << index);
4741 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4742 } else {
4743 /* tx or rx causes */
4744 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4745 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4746 ivar = IXGBE_READ_REG(hw,
4747 IXGBE_IVAR(intr_alloc_entry >> 1));
4748 ivar &= ~(0xFF << index);
4749 ivar |= (msix_vector << index);
4750 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4751 ivar);
4752 }
4753 break;
4767 */
4768 static void
4769 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4770 {
4771 struct ixgbe_hw *hw = &ixgbe->hw;
4772 u32 ivar, index;
4773
4774 switch (hw->mac.type) {
4775 case ixgbe_mac_82598EB:
4776 if (cause == -1) {
4777 cause = 0;
4778 }
4779 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4780 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4781 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4782 (intr_alloc_entry & 0x3)));
4783 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4784 break;
4785
4786 case ixgbe_mac_82599EB:
4787 if (cause == -1) {
4788 /* other causes */
4789 index = (intr_alloc_entry & 1) * 8;
4790 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4791 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4792 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4793 } else {
4794 /* tx or rx causes */
4795 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4796 ivar = IXGBE_READ_REG(hw,
4797 IXGBE_IVAR(intr_alloc_entry >> 1));
4798 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4799 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4800 ivar);
4801 }
4802 break;
4803
4804 default:
4805 break;
4806 }
4816 */
4817 static void
4818 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4819 {
4820 struct ixgbe_hw *hw = &ixgbe->hw;
4821 u32 ivar, index;
4822
4823 switch (hw->mac.type) {
4824 case ixgbe_mac_82598EB:
4825 if (cause == -1) {
4826 cause = 0;
4827 }
4828 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4829 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4830 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4831 (intr_alloc_entry & 0x3)));
4832 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4833 break;
4834
4835 case ixgbe_mac_82599EB:
4836 if (cause == -1) {
4837 /* other causes */
4838 index = (intr_alloc_entry & 1) * 8;
4839 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4840 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4841 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4842 } else {
4843 /* tx or rx causes */
4844 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4845 ivar = IXGBE_READ_REG(hw,
4846 IXGBE_IVAR(intr_alloc_entry >> 1));
4847 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4848 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4849 ivar);
4850 }
4851 break;
4852
4853 default:
4854 break;
4855 }
4858 /*
4859 * Convert the rx ring index driver maintained to the rx ring index
4860 * in h/w.
4861 */
4862 static uint32_t
4863 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4864 {
4865
4866 struct ixgbe_hw *hw = &ixgbe->hw;
4867 uint32_t rx_ring_per_group, hw_rx_index;
4868
4869 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4870 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4871 return (sw_rx_index);
4872 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4873 switch (hw->mac.type) {
4874 case ixgbe_mac_82598EB:
4875 return (sw_rx_index);
4876
4877 case ixgbe_mac_82599EB:
4878 return (sw_rx_index * 2);
4879
4880 default:
4881 break;
4882 }
4883 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4884 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4885
4886 switch (hw->mac.type) {
4887 case ixgbe_mac_82598EB:
4888 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4889 16 + (sw_rx_index % rx_ring_per_group);
4890 return (hw_rx_index);
4891
4892 case ixgbe_mac_82599EB:
4893 if (ixgbe->num_rx_groups > 32) {
4894 hw_rx_index = (sw_rx_index /
4895 rx_ring_per_group) * 2 +
4896 (sw_rx_index % rx_ring_per_group);
4897 } else {
4898 hw_rx_index = (sw_rx_index /
4899 rx_ring_per_group) * 4 +
4900 (sw_rx_index % rx_ring_per_group);
4901 }
4902 return (hw_rx_index);
4903
4904 default:
4905 break;
4906 }
4907 }
4908
4909 /*
4910 * Should never reach. Just to make compiler happy.
4911 */
4912 return (sw_rx_index);
4977 */
4978 static void
4979 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4980 {
4981 struct ixgbe_hw *hw = &ixgbe->hw;
4982 ixgbe_intr_vector_t *vect; /* vector bitmap */
4983 int r_idx; /* ring index */
4984 int v_idx; /* vector index */
4985 uint32_t hw_index;
4986
4987 /*
4988 * Clear any previous entries
4989 */
4990 switch (hw->mac.type) {
4991 case ixgbe_mac_82598EB:
4992 for (v_idx = 0; v_idx < 25; v_idx++)
4993 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4994 break;
4995
4996 case ixgbe_mac_82599EB:
4997 for (v_idx = 0; v_idx < 64; v_idx++)
4998 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4999 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5000 break;
5001
5002 default:
5003 break;
5004 }
5005
5006 /*
5007 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5008 * tx rings[0] will use RTxQ[1].
5009 */
5010 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5011 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5012 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5013 return;
5014 }
5015
5016 /*
|
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
30 */
31
32 #include "ixgbe_sw.h"
33
34 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
35 static char ixgbe_version[] = "ixgbe 1.1.7";
36
37 /*
38 * Local function protoypes
39 */
40 static int ixgbe_register_mac(ixgbe_t *);
41 static int ixgbe_identify_hardware(ixgbe_t *);
42 static int ixgbe_regs_map(ixgbe_t *);
43 static void ixgbe_init_properties(ixgbe_t *);
44 static int ixgbe_init_driver_settings(ixgbe_t *);
45 static void ixgbe_init_locks(ixgbe_t *);
46 static void ixgbe_destroy_locks(ixgbe_t *);
47 static int ixgbe_init(ixgbe_t *);
48 static int ixgbe_chip_start(ixgbe_t *);
49 static void ixgbe_chip_stop(ixgbe_t *);
276 0xFF8, /* maximum interrupt throttle rate */
277 0, /* minimum interrupt throttle rate */
278 200, /* default interrupt throttle rate */
279 64, /* maximum total msix vectors */
280 16, /* maximum number of ring vectors */
281 2, /* maximum number of other vectors */
282 (IXGBE_EICR_LSC
283 | IXGBE_EICR_GPI_SDP1
284 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
285
286 (IXGBE_SDP1_GPIEN
287 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
288
289 (IXGBE_FLAG_DCA_CAPABLE
290 | IXGBE_FLAG_RSS_CAPABLE
291 | IXGBE_FLAG_VMDQ_CAPABLE
292 | IXGBE_FLAG_RSC_CAPABLE
293 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
294 };
295
296 static adapter_info_t ixgbe_X540_cap = {
297 128, /* maximum number of rx queues */
298 1, /* minimum number of rx queues */
299 128, /* default number of rx queues */
300 64, /* maximum number of rx groups */
301 1, /* minimum number of rx groups */
302 1, /* default number of rx groups */
303 128, /* maximum number of tx queues */
304 1, /* minimum number of tx queues */
305 8, /* default number of tx queues */
306 15500, /* maximum MTU size */
307 0xFF8, /* maximum interrupt throttle rate */
308 0, /* minimum interrupt throttle rate */
309 200, /* default interrupt throttle rate */
310 64, /* maximum total msix vectors */
311 16, /* maximum number of ring vectors */
312 2, /* maximum number of other vectors */
313 /* XXX KEBE ASKS, Do we care about X540's SDP3? */
314 (IXGBE_EICR_LSC
315 | IXGBE_EICR_GPI_SDP0
316 | IXGBE_EICR_GPI_SDP1
317 | IXGBE_EICR_GPI_SDP2
318 /* | IXGBE_EICR_GPI_SDP3 */), /* "other" interrupt types handled */
319
320 (IXGBE_SDP1_GPIEN
321 | IXGBE_SDP2_GPIEN
322 /* | IXGBE_SDP3_GPIEN */), /* "other" interrupt types enable mask */
323
324 /* XXX KEBE ASKS, SFP_PLUG capable?!? */
325 (IXGBE_FLAG_DCA_CAPABLE
326 | IXGBE_FLAG_RSS_CAPABLE
327 | IXGBE_FLAG_VMDQ_CAPABLE
328 | IXGBE_FLAG_RSC_CAPABLE
329 /* | IXGBE_FLAG_SFP_PLUG_CAPABLE */) /* capability flags */
330 };
331
332 /*
333 * Module Initialization Functions.
334 */
335
336 int
337 _init(void)
338 {
339 int status;
340
341 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
342
343 status = mod_install(&ixgbe_modlinkage);
344
345 if (status != DDI_SUCCESS) {
346 mac_fini_ops(&ixgbe_dev_ops);
347 }
348
349 return (status);
350 }
351
888 ixgbe->capab = &ixgbe_82598eb_cap;
889
890 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
891 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
892 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
893 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
894 }
895 break;
896
897 case ixgbe_mac_82599EB:
898 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
899 ixgbe->capab = &ixgbe_82599eb_cap;
900
901 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
902 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
903 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
904 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
905 }
906 break;
907
908 case ixgbe_mac_X540:
909 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
910 ixgbe->capab = &ixgbe_X540_cap;
911 /*
912 * For now, X540 is all set in its capab structure.
913 * As other X540 variants show up, things can change here.
914 */
915 break;
916
917 default:
918 IXGBE_DEBUGLOG_1(ixgbe,
919 "adapter not supported in ixgbe_identify_hardware(): %d\n",
920 hw->mac.type);
921 return (IXGBE_FAILURE);
922 }
923
924 return (IXGBE_SUCCESS);
925 }
926
927 /*
928 * ixgbe_regs_map - Map the device registers.
929 *
930 */
931 static int
932 ixgbe_regs_map(ixgbe_t *ixgbe)
933 {
934 dev_info_t *devinfo = ixgbe->dip;
935 struct ixgbe_hw *hw = &ixgbe->hw;
936 struct ixgbe_osdep *osdep = &ixgbe->osdep;
1230 */
1231 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1232 /*
1233 * Some PCI-E parts fail the first check due to
1234 * the link being in sleep state. Call it again,
1235 * if it fails a second time it's a real issue.
1236 */
1237 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1238 ixgbe_error(ixgbe,
1239 "Invalid NVM checksum. Please contact "
1240 "the vendor to update the NVM.");
1241 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1242 goto init_fail;
1243 }
1244 }
1245
1246 /*
1247 * Setup default flow control thresholds - enable/disable
1248 * & flow control type is controlled by ixgbe.conf
1249 */
1250 hw->fc.high_water[0] = DEFAULT_FCRTH;
1251 hw->fc.low_water[0] = DEFAULT_FCRTL;
1252 hw->fc.pause_time = DEFAULT_FCPAUSE;
1253 hw->fc.send_xon = B_TRUE;
1254
1255 /*
1256 * Initialize link settings
1257 */
1258 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1259
1260 /*
1261 * Initialize the chipset hardware
1262 */
1263 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1264 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1265 goto init_fail;
1266 }
1267
1268 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1269 goto init_fail;
1270 }
1271
2126 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2127
2128 rx_data->rbd_next = 0;
2129 rx_data->lro_first = 0;
2130
2131 /*
2132 * Setup the Receive Descriptor Control Register (RXDCTL)
2133 * PTHRESH=32 descriptors (half the internal cache)
2134 * HTHRESH=0 descriptors (to minimize latency on fetch)
2135 * WTHRESH defaults to 1 (writeback each descriptor)
2136 */
2137 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2138 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2139
2140 /* Not a valid value for 82599 */
2141 if (hw->mac.type < ixgbe_mac_82599EB) {
2142 reg_val |= 0x0020; /* pthresh */
2143 }
2144 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2145
2146 if (hw->mac.type >= ixgbe_mac_82599EB) {
2147 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2148 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2149 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2150 }
2151
2152 /*
2153 * Setup the Split and Replication Receive Control Register.
2154 * Set the rx buffer size and the advanced descriptor type.
2155 */
2156 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2157 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2158 reg_val |= IXGBE_SRRCTL_DROP_EN;
2159 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2160 }
2161
2162 static void
2163 ixgbe_setup_rx(ixgbe_t *ixgbe)
2164 {
2165 ixgbe_rx_ring_t *rx_ring;
2166 struct ixgbe_hw *hw = &ixgbe->hw;
2363 */
2364 tx_ring->tbd_head_wb = (uint32_t *)
2365 ((uintptr_t)tx_ring->tbd_area.address + size);
2366 *tx_ring->tbd_head_wb = 0;
2367
2368 buf_low = (uint32_t)
2369 (tx_ring->tbd_area.dma_address + size);
2370 buf_high = (uint32_t)
2371 ((tx_ring->tbd_area.dma_address + size) >> 32);
2372
2373 /* Set the head write-back enable bit */
2374 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2375
2376 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2377 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2378
2379 /*
2380 * Turn off relaxed ordering for head write back or it will
2381 * cause problems with the tx recycling
2382 */
2383 #if 1
2384 /* XXX KEBE ASKS --> Should we do what FreeBSD does? */
2385 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2386 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2387 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2388 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2389 if (hw->mac.type == ixgbe_mac_82598EB) {
2390 IXGBE_WRITE_REG(hw,
2391 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2392 } else {
2393 IXGBE_WRITE_REG(hw,
2394 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2395 }
2396 #else
2397 /* XXX KEBE ASKS --> Or should we do what we've always done? */
2398 reg_val = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index));
2399 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2400 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2401 #endif
2402 } else {
2403 tx_ring->tbd_head_wb = NULL;
2404 #if 1
2405 /*
2406 * XXX KEBE ASKS --> Should we do what FreeBSD does and
2407 * twiddle TXCTRL_DESC_WR0_EN off anyway?
2408 */
2409 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2410 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2411 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2412 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2413 if (hw->mac.type == ixgbe_mac_82598EB) {
2414 IXGBE_WRITE_REG(hw,
2415 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2416 } else {
2417 IXGBE_WRITE_REG(hw,
2418 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2419 }
2420 #endif
2421 }
2422
2423 tx_ring->tbd_head = 0;
2424 tx_ring->tbd_tail = 0;
2425 tx_ring->tbd_free = tx_ring->ring_size;
2426
2427 if (ixgbe->tx_ring_init == B_TRUE) {
2428 tx_ring->tcb_head = 0;
2429 tx_ring->tcb_tail = 0;
2430 tx_ring->tcb_free = tx_ring->free_list_size;
2431 }
2432
2433 /*
2434 * Initialize the s/w context structure
2435 */
2436 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2437 }
2438
2439 static void
2440 ixgbe_setup_tx(ixgbe_t *ixgbe)
2441 {
2447
2448 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2449 tx_ring = &ixgbe->tx_rings[i];
2450 ixgbe_setup_tx_ring(tx_ring);
2451 }
2452
2453 /*
2454 * Setup the per-ring statistics mapping.
2455 */
2456 ring_mapping = 0;
2457 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2458 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2459 if ((i & 0x3) == 0x3) {
2460 switch (hw->mac.type) {
2461 case ixgbe_mac_82598EB:
2462 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2463 ring_mapping);
2464 break;
2465
2466 case ixgbe_mac_82599EB:
2467 case ixgbe_mac_X540:
2468 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2469 ring_mapping);
2470 break;
2471
2472 default:
2473 break;
2474 }
2475
2476 ring_mapping = 0;
2477 }
2478 }
2479 if (i & 0x3) {
2480 switch (hw->mac.type) {
2481 case ixgbe_mac_82598EB:
2482 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2483 break;
2484
2485 case ixgbe_mac_82599EB:
2486 case ixgbe_mac_X540:
2487 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2488 break;
2489
2490 default:
2491 break;
2492 }
2493 }
2494
2495 /*
2496 * Enable CRC appending and TX padding (for short tx frames)
2497 */
2498 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2499 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2500 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2501
2502 /*
2503 * enable DMA for 82599 and X540 parts
2504 */
2505 if (hw->mac.type >= ixgbe_mac_82599EB) {
2506 /* DMATXCTL.TE must be set after all Tx config is complete */
2507 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2508 reg_val |= IXGBE_DMATXCTL_TE;
2509 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2510 #if 0
2511 /* XXX KEBE SAYS - FreeBSD sets up MTQC. Should we? */
2512 /* Disable arbiter to set MTQC */
2513 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2514 reg_val |= IXGBE_RTTDCS_ARBDIS;
2515 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2516 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2517 reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2518 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2519 #endif
2520 }
2521
2522 /*
2523 * Enabling tx queues ..
2524 * For 82599 must be done after DMATXCTL.TE is set
2525 */
2526 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2527 tx_ring = &ixgbe->tx_rings[i];
2528 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2529 reg_val |= IXGBE_TXDCTL_ENABLE;
2530 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2531 }
2532 }
2533
2534 /*
2535 * ixgbe_setup_rss - Setup receive-side scaling feature.
2536 */
2537 static void
2538 ixgbe_setup_rss(ixgbe_t *ixgbe)
2539 {
2599 {
2600 struct ixgbe_hw *hw = &ixgbe->hw;
2601 uint32_t vmdctl, i, vtctl;
2602
2603 /*
2604 * Setup the VMDq Control register, enable VMDq based on
2605 * packet destination MAC address:
2606 */
2607 switch (hw->mac.type) {
2608 case ixgbe_mac_82598EB:
2609 /*
2610 * VMDq Enable = 1;
2611 * VMDq Filter = 0; MAC filtering
2612 * Default VMDq output index = 0;
2613 */
2614 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2615 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2616 break;
2617
2618 case ixgbe_mac_82599EB:
2619 case ixgbe_mac_X540:
2620 /*
2621 * Enable VMDq-only.
2622 */
2623 vmdctl = IXGBE_MRQC_VMDQEN;
2624 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2625
2626 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2627 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2628 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2629 }
2630
2631 /*
2632 * Enable Virtualization and Replication.
2633 */
2634 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2635 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2636
2637 /*
2638 * Enable receiving packets to all VFs
2639 */
2693 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2694 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2695 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2696 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2697 IXGBE_MRQC_RSS_FIELD_IPV6 |
2698 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2699 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2700 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2701 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2702
2703 /*
2704 * Enable and Setup VMDq
2705 * VMDq Filter = 0; MAC filtering
2706 * Default VMDq output index = 0;
2707 */
2708 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2709 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2710 break;
2711
2712 case ixgbe_mac_82599EB:
2713 case ixgbe_mac_X540:
2714 /*
2715 * Enable RSS & Setup RSS Hash functions
2716 */
2717 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2718 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2719 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2720 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2721 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2722 IXGBE_MRQC_RSS_FIELD_IPV6 |
2723 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2724 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2725 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2726
2727 /*
2728 * Enable VMDq+RSS.
2729 */
2730 if (ixgbe->num_rx_groups > 32) {
2731 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2732 } else {
2733 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2739 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2740 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2741 }
2742 break;
2743
2744 default:
2745 break;
2746
2747 }
2748
2749 /*
2750 * Disable Packet Checksum to enable RSS for multiple receive queues.
2751 * It is an adapter hardware limitation that Packet Checksum is
2752 * mutually exclusive with RSS.
2753 */
2754 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2755 rxcsum |= IXGBE_RXCSUM_PCSD;
2756 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2757 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2758
2759 if (hw->mac.type >= ixgbe_mac_82599EB) {
2760 /*
2761 * Enable Virtualization and Replication.
2762 */
2763 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2764 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2765
2766 /*
2767 * Enable receiving packets to all VFs
2768 */
2769 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2770 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2771 }
2772 }
2773
2774 /*
2775 * ixgbe_init_unicst - Initialize the unicast addresses.
2776 */
2777 static void
2778 ixgbe_init_unicst(ixgbe_t *ixgbe)
2779 {
2914 * and save them in the hardware registers.
2915 */
2916 static void
2917 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2918 {
2919 uint8_t *mc_addr_list;
2920 uint32_t mc_addr_count;
2921 struct ixgbe_hw *hw = &ixgbe->hw;
2922
2923 ASSERT(mutex_owned(&ixgbe->gen_lock));
2924
2925 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2926
2927 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2928 mc_addr_count = ixgbe->mcast_count;
2929
2930 /*
2931 * Update the multicast addresses to the MTA registers
2932 */
2933 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2934 ixgbe_mc_table_itr, TRUE);
2935 }
2936
2937 /*
2938 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2939 *
2940 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2941 * Different chipsets may have different allowed configuration of vmdq and rss.
2942 */
2943 static void
2944 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2945 {
2946 struct ixgbe_hw *hw = &ixgbe->hw;
2947 uint32_t ring_per_group;
2948
2949 switch (hw->mac.type) {
2950 case ixgbe_mac_82598EB:
2951 /*
2952 * 82598 supports the following combination:
2953 * vmdq no. x rss no.
2954 * [5..16] x 1
2955 * [1..4] x [1..16]
2956 * However 8 rss queue per pool (vmdq) is sufficient for
2957 * most cases.
2958 */
2959 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2960 if (ixgbe->num_rx_groups > 4) {
2961 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2962 } else {
2963 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2964 min(8, ring_per_group);
2965 }
2966
2967 break;
2968
2969 case ixgbe_mac_82599EB:
2970 case ixgbe_mac_X540:
2971 /*
2972 * 82599 supports the following combination:
2973 * vmdq no. x rss no.
2974 * [33..64] x [1..2]
2975 * [2..32] x [1..4]
2976 * 1 x [1..16]
2977 * However 8 rss queue per pool (vmdq) is sufficient for
2978 * most cases.
2979 *
2980 * For now, treat X540 like the 82599.
2981 */
2982 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2983 if (ixgbe->num_rx_groups == 1) {
2984 ixgbe->num_rx_rings = min(8, ring_per_group);
2985 } else if (ixgbe->num_rx_groups <= 32) {
2986 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2987 min(4, ring_per_group);
2988 } else if (ixgbe->num_rx_groups <= 64) {
2989 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2990 min(2, ring_per_group);
2991 }
2992 break;
2993
2994 default:
2995 break;
2996 }
2997
2998 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2999
3000 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
3123 * 1 = force interrupt type MSI-X
3124 * 2 = force interrupt type MSI
3125 * 3 = force interrupt type Legacy
3126 */
3127 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3128 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3129
3130 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3131 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3132 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3133 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3134 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3135 0, 1, DEFAULT_LSO_ENABLE);
3136 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3137 0, 1, DEFAULT_LRO_ENABLE);
3138 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3139 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3140 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3141 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3142
3143 /* Head Write Back not recommended for 82599 and X540 */
3144 if (hw->mac.type >= ixgbe_mac_82599EB) {
3145 ixgbe->tx_head_wb_enable = B_FALSE;
3146 }
3147
3148 /*
3149 * ixgbe LSO needs the tx h/w checksum support.
3150 * LSO will be disabled if tx h/w checksum is not
3151 * enabled.
3152 */
3153 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3154 ixgbe->lso_enable = B_FALSE;
3155 }
3156
3157 /*
3158 * ixgbe LRO needs the rx h/w checksum support.
3159 * LRO will be disabled if rx h/w checksum is not
3160 * enabled.
3161 */
3162 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3163 ixgbe->lro_enable = B_FALSE;
3164 }
3165
3166 /*
3167 * ixgbe LRO only been supported by 82599 and X540 now
3168 */
3169 if (hw->mac.type < ixgbe_mac_82599EB) {
3170 ixgbe->lro_enable = B_FALSE;
3171 }
3172 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3173 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3174 DEFAULT_TX_COPY_THRESHOLD);
3175 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3176 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3177 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3178 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3179 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3180 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3181 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3182 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3183 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3184
3185 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3186 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3187 DEFAULT_RX_COPY_THRESHOLD);
3188 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3189 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3190 DEFAULT_RX_LIMIT_PER_INTR);
3191
3192 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3193 ixgbe->capab->min_intr_throttle,
3194 ixgbe->capab->max_intr_throttle,
3195 ixgbe->capab->def_intr_throttle);
3196 /*
3197 * 82599 and X540 require the interupt throttling rate is
3198 * a multiple of 8. This is enforced by the register
3199 * definiton.
3200 */
3201 if (hw->mac.type >= ixgbe_mac_82599EB)
3202 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3203 }
3204
3205 static void
3206 ixgbe_init_params(ixgbe_t *ixgbe)
3207 {
3208 ixgbe->param_en_10000fdx_cap = 1;
3209 ixgbe->param_en_1000fdx_cap = 1;
3210 ixgbe->param_en_100fdx_cap = 1;
3211 ixgbe->param_adv_10000fdx_cap = 1;
3212 ixgbe->param_adv_1000fdx_cap = 1;
3213 ixgbe->param_adv_100fdx_cap = 1;
3214
3215 ixgbe->param_pause_cap = 1;
3216 ixgbe->param_asym_pause_cap = 1;
3217 ixgbe->param_rem_fault = 0;
3218
3219 ixgbe->param_adv_autoneg_cap = 1;
3220 ixgbe->param_adv_pause_cap = 1;
3221 ixgbe->param_adv_asym_pause_cap = 1;
3307 /*
3308 * ixgbe_driver_link_check - Link status processing.
3309 *
3310 * This function can be called in both kernel context and interrupt context
3311 */
3312 static void
3313 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3314 {
3315 struct ixgbe_hw *hw = &ixgbe->hw;
3316 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3317 boolean_t link_up = B_FALSE;
3318 boolean_t link_changed = B_FALSE;
3319
3320 ASSERT(mutex_owned(&ixgbe->gen_lock));
3321
3322 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3323 if (link_up) {
3324 ixgbe->link_check_complete = B_TRUE;
3325
3326 /* Link is up, enable flow control settings */
3327 (void) ixgbe_fc_enable(hw);
3328
3329 /*
3330 * The Link is up, check whether it was marked as down earlier
3331 */
3332 if (ixgbe->link_state != LINK_STATE_UP) {
3333 switch (speed) {
3334 case IXGBE_LINK_SPEED_10GB_FULL:
3335 ixgbe->link_speed = SPEED_10GB;
3336 break;
3337 case IXGBE_LINK_SPEED_1GB_FULL:
3338 ixgbe->link_speed = SPEED_1GB;
3339 break;
3340 case IXGBE_LINK_SPEED_100_FULL:
3341 ixgbe->link_speed = SPEED_100;
3342 }
3343 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3344 ixgbe->link_state = LINK_STATE_UP;
3345 link_changed = B_TRUE;
3346 }
3347 } else {
3826 eiac = 0;
3827
3828 /*
3829 * General purpose interrupt enable.
3830 * For 82599, extended interrupt automask enable
3831 * only in MSI or MSI-X mode
3832 */
3833 if ((hw->mac.type < ixgbe_mac_82599EB) ||
3834 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3835 gpie |= IXGBE_GPIE_EIAME;
3836 }
3837 }
3838
3839 /* Enable specific "other" interrupt types */
3840 switch (hw->mac.type) {
3841 case ixgbe_mac_82598EB:
3842 gpie |= ixgbe->capab->other_gpie;
3843 break;
3844
3845 case ixgbe_mac_82599EB:
3846 case ixgbe_mac_X540:
3847 gpie |= ixgbe->capab->other_gpie;
3848
3849 /* Enable RSC Delay 8us when LRO enabled */
3850 if (ixgbe->lro_enable) {
3851 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3852 }
3853 break;
3854
3855 default:
3856 break;
3857 }
3858
3859 /* write to interrupt control registers */
3860 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3861 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3862 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3863 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3864 IXGBE_WRITE_FLUSH(hw);
3865 }
3866
4020 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4021 &atlas);
4022 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
4023 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4024 atlas);
4025
4026 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4027 &atlas);
4028 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
4029 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4030 atlas);
4031
4032 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4033 &atlas);
4034 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4035 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4036 atlas);
4037 break;
4038
4039 case ixgbe_mac_82599EB:
4040 case ixgbe_mac_X540:
4041 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4042 reg |= (IXGBE_AUTOC_FLU |
4043 IXGBE_AUTOC_10G_KX4);
4044 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4045
4046 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4047 B_FALSE, B_TRUE);
4048 break;
4049
4050 default:
4051 break;
4052 }
4053 }
4054
4055 #pragma inline(ixgbe_intr_rx_work)
4056 /*
4057 * ixgbe_intr_rx_work - RX processing of ISR.
4058 */
4059 static void
4060 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4239 * Recycle the tx descriptors
4240 */
4241 tx_ring = &ixgbe->tx_rings[0];
4242 tx_ring->tx_recycle(tx_ring);
4243
4244 /*
4245 * Schedule the re-transmit
4246 */
4247 tx_reschedule = (tx_ring->reschedule &&
4248 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4249 }
4250
4251 /* any interrupt type other than tx/rx */
4252 if (eicr & ixgbe->capab->other_intr) {
4253 switch (hw->mac.type) {
4254 case ixgbe_mac_82598EB:
4255 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4256 break;
4257
4258 case ixgbe_mac_82599EB:
4259 case ixgbe_mac_X540:
4260 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4261 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4262 break;
4263
4264 default:
4265 break;
4266 }
4267 ixgbe_intr_other_work(ixgbe, eicr);
4268 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4269 }
4270
4271 mutex_exit(&ixgbe->gen_lock);
4272
4273 result = DDI_INTR_CLAIMED;
4274 } else {
4275 mutex_exit(&ixgbe->gen_lock);
4276
4277 /*
4278 * No interrupt cause bits set: don't claim this interrupt.
4279 */
4333 if (eicr & 0x1) {
4334 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4335 }
4336
4337 /*
4338 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4339 */
4340 if (eicr & 0x2) {
4341 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4342 }
4343
4344 /* any interrupt type other than tx/rx */
4345 if (eicr & ixgbe->capab->other_intr) {
4346 mutex_enter(&ixgbe->gen_lock);
4347 switch (hw->mac.type) {
4348 case ixgbe_mac_82598EB:
4349 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4350 break;
4351
4352 case ixgbe_mac_82599EB:
4353 case ixgbe_mac_X540:
4354 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4355 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4356 break;
4357
4358 default:
4359 break;
4360 }
4361 ixgbe_intr_other_work(ixgbe, eicr);
4362 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4363 mutex_exit(&ixgbe->gen_lock);
4364 }
4365
4366 /* re-enable the interrupts which were automasked */
4367 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4368
4369 return (DDI_INTR_CLAIMED);
4370 }
4371
4372 /*
4373 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4413 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4414 DDI_FM_OK) {
4415 ddi_fm_service_impact(ixgbe->dip,
4416 DDI_SERVICE_DEGRADED);
4417 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4418 return (DDI_INTR_CLAIMED);
4419 }
4420
4421 /*
4422 * Check "other" cause bits: any interrupt type other than tx/rx
4423 */
4424 if (eicr & ixgbe->capab->other_intr) {
4425 mutex_enter(&ixgbe->gen_lock);
4426 switch (hw->mac.type) {
4427 case ixgbe_mac_82598EB:
4428 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4429 ixgbe_intr_other_work(ixgbe, eicr);
4430 break;
4431
4432 case ixgbe_mac_82599EB:
4433 case ixgbe_mac_X540:
4434 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4435 ixgbe_intr_other_work(ixgbe, eicr);
4436 break;
4437
4438 default:
4439 break;
4440 }
4441 mutex_exit(&ixgbe->gen_lock);
4442 }
4443
4444 /* re-enable the interrupts which were automasked */
4445 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4446 }
4447
4448 return (DDI_INTR_CLAIMED);
4449 }
4450
4451 /*
4452 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4453 *
4814 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4815 int8_t cause)
4816 {
4817 struct ixgbe_hw *hw = &ixgbe->hw;
4818 u32 ivar, index;
4819
4820 switch (hw->mac.type) {
4821 case ixgbe_mac_82598EB:
4822 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4823 if (cause == -1) {
4824 cause = 0;
4825 }
4826 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4827 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4828 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4829 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4830 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4831 break;
4832
4833 case ixgbe_mac_82599EB:
4834 case ixgbe_mac_X540:
4835 if (cause == -1) {
4836 /* other causes */
4837 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4838 index = (intr_alloc_entry & 1) * 8;
4839 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4840 ivar &= ~(0xFF << index);
4841 ivar |= (msix_vector << index);
4842 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4843 } else {
4844 /* tx or rx causes */
4845 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4846 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4847 ivar = IXGBE_READ_REG(hw,
4848 IXGBE_IVAR(intr_alloc_entry >> 1));
4849 ivar &= ~(0xFF << index);
4850 ivar |= (msix_vector << index);
4851 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4852 ivar);
4853 }
4854 break;
4868 */
4869 static void
4870 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4871 {
4872 struct ixgbe_hw *hw = &ixgbe->hw;
4873 u32 ivar, index;
4874
4875 switch (hw->mac.type) {
4876 case ixgbe_mac_82598EB:
4877 if (cause == -1) {
4878 cause = 0;
4879 }
4880 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4881 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4882 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4883 (intr_alloc_entry & 0x3)));
4884 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4885 break;
4886
4887 case ixgbe_mac_82599EB:
4888 case ixgbe_mac_X540:
4889 if (cause == -1) {
4890 /* other causes */
4891 index = (intr_alloc_entry & 1) * 8;
4892 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4893 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4894 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4895 } else {
4896 /* tx or rx causes */
4897 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4898 ivar = IXGBE_READ_REG(hw,
4899 IXGBE_IVAR(intr_alloc_entry >> 1));
4900 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4901 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4902 ivar);
4903 }
4904 break;
4905
4906 default:
4907 break;
4908 }
4918 */
4919 static void
4920 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4921 {
4922 struct ixgbe_hw *hw = &ixgbe->hw;
4923 u32 ivar, index;
4924
4925 switch (hw->mac.type) {
4926 case ixgbe_mac_82598EB:
4927 if (cause == -1) {
4928 cause = 0;
4929 }
4930 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4931 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4932 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4933 (intr_alloc_entry & 0x3)));
4934 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4935 break;
4936
4937 case ixgbe_mac_82599EB:
4938 case ixgbe_mac_X540:
4939 if (cause == -1) {
4940 /* other causes */
4941 index = (intr_alloc_entry & 1) * 8;
4942 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4943 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4944 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4945 } else {
4946 /* tx or rx causes */
4947 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4948 ivar = IXGBE_READ_REG(hw,
4949 IXGBE_IVAR(intr_alloc_entry >> 1));
4950 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4951 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4952 ivar);
4953 }
4954 break;
4955
4956 default:
4957 break;
4958 }
4961 /*
4962 * Convert the rx ring index driver maintained to the rx ring index
4963 * in h/w.
4964 */
4965 static uint32_t
4966 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4967 {
4968
4969 struct ixgbe_hw *hw = &ixgbe->hw;
4970 uint32_t rx_ring_per_group, hw_rx_index;
4971
4972 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4973 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4974 return (sw_rx_index);
4975 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4976 switch (hw->mac.type) {
4977 case ixgbe_mac_82598EB:
4978 return (sw_rx_index);
4979
4980 case ixgbe_mac_82599EB:
4981 case ixgbe_mac_X540:
4982 return (sw_rx_index * 2);
4983
4984 default:
4985 break;
4986 }
4987 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4988 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4989
4990 switch (hw->mac.type) {
4991 case ixgbe_mac_82598EB:
4992 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4993 16 + (sw_rx_index % rx_ring_per_group);
4994 return (hw_rx_index);
4995
4996 case ixgbe_mac_82599EB:
4997 case ixgbe_mac_X540:
4998 if (ixgbe->num_rx_groups > 32) {
4999 hw_rx_index = (sw_rx_index /
5000 rx_ring_per_group) * 2 +
5001 (sw_rx_index % rx_ring_per_group);
5002 } else {
5003 hw_rx_index = (sw_rx_index /
5004 rx_ring_per_group) * 4 +
5005 (sw_rx_index % rx_ring_per_group);
5006 }
5007 return (hw_rx_index);
5008
5009 default:
5010 break;
5011 }
5012 }
5013
5014 /*
5015 * Should never reach. Just to make compiler happy.
5016 */
5017 return (sw_rx_index);
5082 */
5083 static void
5084 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5085 {
5086 struct ixgbe_hw *hw = &ixgbe->hw;
5087 ixgbe_intr_vector_t *vect; /* vector bitmap */
5088 int r_idx; /* ring index */
5089 int v_idx; /* vector index */
5090 uint32_t hw_index;
5091
5092 /*
5093 * Clear any previous entries
5094 */
5095 switch (hw->mac.type) {
5096 case ixgbe_mac_82598EB:
5097 for (v_idx = 0; v_idx < 25; v_idx++)
5098 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5099 break;
5100
5101 case ixgbe_mac_82599EB:
5102 case ixgbe_mac_X540:
5103 for (v_idx = 0; v_idx < 64; v_idx++)
5104 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5105 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5106 break;
5107
5108 default:
5109 break;
5110 }
5111
5112 /*
5113 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5114 * tx rings[0] will use RTxQ[1].
5115 */
5116 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5117 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5118 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5119 return;
5120 }
5121
5122 /*
|