1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved.
  14  * Copyright 2019 Joyent, Inc.
  15  * Copyright 2017 Tegile Systems, Inc.  All rights reserved.
  16  * Copyright 2020 RackTop Systems, Inc.
  17  * Copyright 2020 Ryan Zezeski
  18  * Copyright 2021 Oxide Computer Company
  19  */
  20 
  21 /*
  22  * i40e - Intel 10/40 Gb Ethernet driver
  23  *
  24  * The i40e driver is the main software device driver for the Intel 40 Gb family
  25  * of devices. Note that these devices come in many flavors with both 40 GbE
  26  * ports and 10 GbE ports. This device is the successor to the 82599 family of
  27  * devices (ixgbe).
  28  *
  29  * Unlike previous generations of Intel 1 GbE and 10 GbE devices, the 40 GbE
  30  * devices defined in the XL710 controller (previously known as Fortville) are a
  31  * rather different beast and have a small switch embedded inside of them. In
  32  * addition, the way that most of the programming is done has been overhauled.
  33  * As opposed to just using PCIe memory mapped registers, it also has an
  34  * administrative queue which is used to communicate with firmware running on
  35  * the chip.
  36  *
  37  * Each physical function in the hardware shows up as a device that this driver
  38  * will bind to. The hardware splits many resources evenly across all of the
  39  * physical functions present on the device, while other resources are instead
  40  * shared across the entire card and its up to the device driver to
  41  * intelligently partition them.
  42  *
  43  * ------------
  44  * Organization
  45  * ------------
  46  *
  47  * This driver is made up of several files which have their own theory
  48  * statements spread across them. We'll touch on the high level purpose of each
  49  * file here, and then we'll get into more discussion on how the device is
  50  * generally modelled with respect to the interfaces in illumos.
  51  *
  52  * i40e_gld.c: This file contains all of the bindings to MAC and the networking
  53  *             stack.
  54  *
  55  * i40e_intr.c: This file contains all of the interrupt service routines and
  56  *              contains logic to enable and disable interrupts on the hardware.
  57  *              It also contains the logic to map hardware resources such as the
  58  *              rings to and from interrupts and controls their ability to fire.
  59  *
  60  *              There is a big theory statement on interrupts present there.
  61  *
  62  * i40e_main.c: The file that you're currently in. It interfaces with the
  63  *              traditional OS DDI interfaces and is in charge of configuring
  64  *              the device.
  65  *
  66  * i40e_osdep.[ch]: These files contain interfaces and definitions needed to
  67  *                  work with Intel's common code for the device.
  68  *
  69  * i40e_stats.c: This file contains the general work and logic around our
  70  *               kstats. A theory statement on their organization and use of the
  71  *               hardware exists there.
  72  *
  73  * i40e_sw.h: This header file contains all of the primary structure definitions
  74  *            and constants that are used across the entire driver.
  75  *
  76  * i40e_transceiver.c: This file contains all of the logic for sending and
  77  *                     receiving data. It contains all of the ring and DMA
  78  *                     allocation logic, as well as, the actual interfaces to
  79  *                     send and receive data.
  80  *
  81  *                     A big theory statement on ring management, descriptors,
  82  *                     and how it ties into the OS is present there.
  83  *
  84  * --------------
  85  * General Design
  86  * --------------
  87  *
  88  * Before we go too far into the general way we've laid out data structures and
  89  * the like, it's worth taking some time to explain how the hardware is
  90  * organized. This organization informs a lot of how we do things at this time
  91  * in the driver.
  92  *
  93  * Each physical device consists of a number of one or more ports, which are
  94  * considered physical functions in the PCI sense and thus each get enumerated
  95  * by the system, resulting in an instance being created and attached to. While
  96  * there are many resources that are unique to each physical function eg.
  97  * instance of the device, there are many that are shared across all of them.
  98  * Several resources have an amount reserved for each Virtual Station Interface
  99  * (VSI) and then a static pool of resources, available for all functions on the
 100  * card.
 101  *
 102  * The most important resource in hardware are its transmit and receive queue
 103  * pairs (i40e_trqpair_t). These should be thought of as rings in GLDv3
 104  * parlance. There are a set number of these on each device; however, they are
 105  * statically partitioned among all of the different physical functions.
 106  *
 107  * 'Fortville' (the code name for this device family) is basically a switch. To
 108  * map MAC addresses and other things to queues, we end up having to create
 109  * Virtual Station Interfaces (VSIs) and establish forwarding rules that direct
 110  * traffic to a queue. A VSI owns a collection of queues and has a series of
 111  * forwarding rules that point to it. One way to think of this is to treat it
 112  * like MAC does a VNIC. When MAC refers to a group, a collection of rings and
 113  * classification resources, that is a VSI in i40e.
 114  *
 115  * The sets of VSIs is shared across the entire device, though there may be some
 116  * amount that are reserved to each PF. Because the GLDv3 does not let us change
 117  * the number of groups dynamically, we instead statically divide this amount
 118  * evenly between all the functions that exist. In addition, we have the same
 119  * problem with the mac address forwarding rules. There are a static number that
 120  * exist shared across all the functions.
 121  *
 122  * To handle both of these resources, what we end up doing is going through and
 123  * determining which functions belong to the same device. Nominally one might do
 124  * this by having a nexus driver; however, a prime requirement for a nexus
 125  * driver is identifying the various children and activating them. While it is
 126  * possible to get this information from NVRAM, we would end up duplicating a
 127  * lot of the PCI enumeration logic. Really, at the end of the day, the device
 128  * doesn't give us the traditional identification properties we want from a
 129  * nexus driver.
 130  *
 131  * Instead, we rely on some properties that are guaranteed to be unique. While
 132  * it might be tempting to leverage the PBA or serial number of the device from
 133  * NVRAM, there is nothing that says that two devices can't be mis-programmed to
 134  * have the same values in NVRAM. Instead, we uniquely identify a group of
 135  * functions based on their parent in the /devices tree, their PCI bus and PCI
 136  * function identifiers. Using either on their own may not be sufficient.
 137  *
 138  * For each unique PCI device that we encounter, we'll create a i40e_device_t.
 139  * From there, because we don't have a good way to tell the GLDv3 about sharing
 140  * resources between everything, we'll end up just dividing the resources
 141  * evenly between all of the functions. Longer term, if we don't have to declare
 142  * to the GLDv3 that these resources are shared, then we'll maintain a pool and
 143  * have each PF allocate from the pool in the device, thus if only two of four
 144  * ports are being used, for example, then all of the resources can still be
 145  * used.
 146  *
 147  * -------------------------------------------
 148  * Transmit and Receive Queue Pair Allocations
 149  * -------------------------------------------
 150  *
 151  * NVRAM ends up assigning each PF its own share of the transmit and receive LAN
 152  * queue pairs, we have no way of modifying it, only observing it. From there,
 153  * it's up to us to map these queues to VSIs and VFs. Since we don't support any
 154  * VFs at this time, we only focus on assignments to VSIs.
 155  *
 156  * At the moment, we used a static mapping of transmit/receive queue pairs to a
 157  * given VSI (eg. rings to a group). Though in the fullness of time, we want to
 158  * make this something which is fully dynamic and take advantage of documented,
 159  * but not yet available functionality for adding filters based on VXLAN and
 160  * other encapsulation technologies.
 161  *
 162  * -------------------------------------
 163  * Broadcast, Multicast, and Promiscuous
 164  * -------------------------------------
 165  *
 166  * As part of the GLDv3, we need to make sure that we can handle receiving
 167  * broadcast and multicast traffic. As well as enabling promiscuous mode when
 168  * requested. GLDv3 requires that all broadcast and multicast traffic be
 169  * retrieved by the default group, eg. the first one. This is the same thing as
 170  * the default VSI.
 171  *
 172  * To receieve broadcast traffic, we enable it through the admin queue, rather
 173  * than use one of our filters for it. For multicast traffic, we reserve a
 174  * certain number of the hash filters and assign them to a given PF. When we
 175  * exceed those, we then switch to using promiscuous mode for multicast traffic.
 176  *
 177  * More specifically, once we exceed the number of filters (indicated because
 178  * the i40e_t`i40e_resources.ifr_nmcastfilt ==
 179  * i40e_t`i40e_resources.ifr_nmcastfilt_used), we then instead need to toggle
 180  * promiscuous mode. If promiscuous mode is toggled then we keep track of the
 181  * number of MACs added to it by incrementing i40e_t`i40e_mcast_promisc_count.
 182  * That will stay enabled until that count reaches zero indicating that we have
 183  * only added multicast addresses that we have a corresponding entry for.
 184  *
 185  * Because MAC itself wants to toggle promiscuous mode, which includes both
 186  * unicast and multicast traffic, we go through and keep track of that
 187  * ourselves. That is maintained through the use of the i40e_t`i40e_promisc_on
 188  * member.
 189  *
 190  * --------------
 191  * VSI Management
 192  * --------------
 193  *
 194  * The PFs share 384 VSIs. The firmware creates one VSI per PF by default.
 195  * During chip start we retrieve the SEID of this VSI and assign it as the
 196  * default VSI for our VEB (one VEB per PF). We then add additional VSIs to
 197  * the VEB up to the determined number of rx groups: i40e_t`i40e_num_rx_groups.
 198  * We currently cap this number to I40E_GROUP_MAX to a) make sure all PFs can
 199  * allocate the same number of VSIs, and b) to keep the interrupt multiplexing
 200  * under control. In the future, when we improve the interrupt allocation, we
 201  * may want to revisit this cap to make better use of the available VSIs. The
 202  * VSI allocation and configuration can be found in i40e_chip_start().
 203  *
 204  * ----------------
 205  * Structure Layout
 206  * ----------------
 207  *
 208  * The following images relates the core data structures together. The primary
 209  * structure in the system is the i40e_t. It itself contains multiple rings,
 210  * i40e_trqpair_t's which contain the various transmit and receive data. The
 211  * receive data is stored outside of the i40e_trqpair_t and instead in the
 212  * i40e_rx_data_t. The i40e_t has a corresponding i40e_device_t which keeps
 213  * track of per-physical device state. Finally, for every active descriptor,
 214  * there is a corresponding control block, which is where the
 215  * i40e_rx_control_block_t and the i40e_tx_control_block_t come from.
 216  *
 217  *   +-----------------------+       +-----------------------+
 218  *   | Global i40e_t list    |       | Global Device list    |
 219  *   |                       |    +--|                       |
 220  *   | i40e_glist            |    |  | i40e_dlist            |
 221  *   +-----------------------+    |  +-----------------------+
 222  *       |                        v
 223  *       |      +------------------------+      +-----------------------+
 224  *       |      | Device-wide Structure  |----->| Device-wide Structure |--> ...
 225  *       |      | i40e_device_t          |      | i40e_device_t         |
 226  *       |      |                        |      +-----------------------+
 227  *       |      | dev_info_t *     ------+--> Parent in devices tree.
 228  *       |      | uint_t           ------+--> PCI bus number
 229  *       |      | uint_t           ------+--> PCI device number
 230  *       |      | uint_t           ------+--> Number of functions
 231  *       |      | i40e_switch_rsrcs_t ---+--> Captured total switch resources
 232  *       |      | list_t           ------+-------------+
 233  *       |      +------------------------+             |
 234  *       |                           ^                 |
 235  *       |                           +--------+        |
 236  *       |                                    |        v
 237  *       |  +---------------------------+     |   +-------------------+
 238  *       +->| GLDv3 Device, per PF      |-----|-->| GLDv3 Device (PF) |--> ...
 239  *          | i40e_t                    |     |   | i40e_t            |
 240  *          | **Primary Structure**     |     |   +-------------------+
 241  *          |                           |     |
 242  *          | i40e_device_t *         --+-----+
 243  *          | i40e_state_t            --+---> Device State
 244  *          | i40e_hw_t               --+---> Intel common code structure
 245  *          | mac_handle_t            --+---> GLDv3 handle to MAC
 246  *          | ddi_periodic_t          --+---> Link activity timer
 247  *          | i40e_vsi_t *            --+---> Array of VSIs
 248  *          | i40e_func_rsrc_t        --+---> Available hardware resources
 249  *          | i40e_switch_rsrc_t *    --+---> Switch resource snapshot
 250  *          | i40e_sdu                --+---> Current MTU
 251  *          | i40e_frame_max          --+---> Current HW frame size
 252  *          | i40e_uaddr_t *          --+---> Array of assigned unicast MACs
 253  *          | i40e_maddr_t *          --+---> Array of assigned multicast MACs
 254  *          | i40e_mcast_promisccount --+---> Active multicast state
 255  *          | i40e_promisc_on         --+---> Current promiscuous mode state
 256  *          | uint_t                  --+---> Number of transmit/receive pairs
 257  *          | i40e_rx_group_t *       --+---> Array of Rx groups
 258  *          | kstat_t *               --+---> PF kstats
 259  *          | i40e_pf_stats_t         --+---> PF kstat backing data
 260  *          | i40e_trqpair_t *        --+---------+
 261  *          +---------------------------+         |
 262  *                                                |
 263  *                                                v
 264  *  +-------------------------------+       +-----------------------------+
 265  *  | Transmit/Receive Queue Pair   |-------| Transmit/Receive Queue Pair |->...
 266  *  | i40e_trqpair_t                |       | i40e_trqpair_t              |
 267  *  + Ring Data Structure           |       +-----------------------------+
 268  *  |                               |
 269  *  | mac_ring_handle_t             +--> MAC RX ring handle
 270  *  | mac_ring_handle_t             +--> MAC TX ring handle
 271  *  | i40e_rxq_stat_t             --+--> RX Queue stats
 272  *  | i40e_txq_stat_t             --+--> TX Queue stats
 273  *  | uint32_t (tx ring size)       +--> TX Ring Size
 274  *  | uint32_t (tx free list size)  +--> TX Free List Size
 275  *  | i40e_dma_buffer_t     --------+--> TX Descriptor ring DMA
 276  *  | i40e_tx_desc_t *      --------+--> TX descriptor ring
 277  *  | volatile unt32_t *            +--> TX Write back head
 278  *  | uint32_t               -------+--> TX ring head
 279  *  | uint32_t               -------+--> TX ring tail
 280  *  | uint32_t               -------+--> Num TX desc free
 281  *  | i40e_tx_control_block_t *   --+--> TX control block array  ---+
 282  *  | i40e_tx_control_block_t **  --+--> TCB work list          ----+
 283  *  | i40e_tx_control_block_t **  --+--> TCB free list           ---+
 284  *  | uint32_t               -------+--> Free TCB count             |
 285  *  | i40e_rx_data_t *       -------+--+                            v
 286  *  +-------------------------------+  |          +---------------------------+
 287  *                                     |          | Per-TX Frame Metadata     |
 288  *                                     |          | i40e_tx_control_block_t   |
 289  *                +--------------------+          |                           |
 290  *                |           mblk to transmit <--+---      mblk_t *          |
 291  *                |           type of transmit <--+---      i40e_tx_type_t    |
 292  *                |              TX DMA handle <--+---      ddi_dma_handle_t  |
 293  *                v              TX DMA buffer <--+---      i40e_dma_buffer_t |
 294  *    +------------------------------+            +---------------------------+
 295  *    | Core Receive Data            |
 296  *    | i40e_rx_data_t               |
 297  *    |                              |
 298  *    | i40e_dma_buffer_t          --+--> RX descriptor DMA Data
 299  *    | i40e_rx_desc_t             --+--> RX descriptor ring
 300  *    | uint32_t                   --+--> Next free desc.
 301  *    | i40e_rx_control_block_t *  --+--> RX Control Block Array  ---+
 302  *    | i40e_rx_control_block_t ** --+--> RCB work list           ---+
 303  *    | i40e_rx_control_block_t ** --+--> RCB free list           ---+
 304  *    +------------------------------+                               |
 305  *                ^                                                  |
 306  *                |     +---------------------------+                |
 307  *                |     | Per-RX Frame Metadata     |<---------------+
 308  *                |     | i40e_rx_control_block_t   |
 309  *                |     |                           |
 310  *                |     | mblk_t *              ----+--> Received mblk_t data
 311  *                |     | uint32_t              ----+--> Reference count
 312  *                |     | i40e_dma_buffer_t     ----+--> Receive data DMA info
 313  *                |     | frtn_t                ----+--> mblk free function info
 314  *                +-----+-- i40e_rx_data_t *        |
 315  *                      +---------------------------+
 316  *
 317  * -------------
 318  * Lock Ordering
 319  * -------------
 320  *
 321  * In order to ensure that we don't deadlock, the following represents the
 322  * lock order being used. When grabbing locks, follow the following order. Lower
 323  * numbers are more important. Thus, the i40e_glock which is number 0, must be
 324  * taken before any other locks in the driver. On the other hand, the
 325  * i40e_t`i40e_stat_lock, has the highest number because it's the least
 326  * important lock. Note, that just because one lock is higher than another does
 327  * not mean that all intermediary locks are required.
 328  *
 329  * 0) i40e_glock
 330  * 1) i40e_t`i40e_general_lock
 331  *
 332  * 2) i40e_trqpair_t`itrq_rx_lock
 333  * 3) i40e_trqpair_t`itrq_tx_lock
 334  * 4) i40e_trqpair_t`itrq_intr_lock
 335  * 5) i40e_t`i40e_rx_pending_lock
 336  * 6) i40e_trqpair_t`itrq_tcb_lock
 337  *
 338  * 7) i40e_t`i40e_stat_lock
 339  *
 340  * Rules and expectations:
 341  *
 342  * 1) A thread holding locks belong to one PF should not hold locks belonging to
 343  * a second. If for some reason this becomes necessary, locks should be grabbed
 344  * based on the list order in the i40e_device_t, which implies that the
 345  * i40e_glock is held.
 346  *
 347  * 2) When grabbing locks between multiple transmit and receive queues, the
 348  * locks for the lowest number transmit/receive queue should be grabbed first.
 349  *
 350  * 3) When grabbing both the transmit and receive lock for a given queue, always
 351  * grab i40e_trqpair_t`itrq_rx_lock before the i40e_trqpair_t`itrq_tx_lock.
 352  *
 353  * 4) The following pairs of locks are not expected to be held at the same time:
 354  *
 355  * o i40e_t`i40e_rx_pending_lock and i40e_trqpair_t`itrq_tcb_lock
 356  * o i40e_trqpair_t`itrq_intr_lock is not expected to be held with any
 357  *   other lock except i40e_t`i40e_general_lock in mc_start(9E) and
 358  *   mc_stop(9e).
 359  *
 360  * -----------
 361  * Future Work
 362  * -----------
 363  *
 364  * At the moment the i40e_t driver is rather bare bones, allowing us to start
 365  * getting data flowing and folks using it while we develop additional features.
 366  * While bugs have been filed to cover this future work, the following gives an
 367  * overview of expected work:
 368  *
 369  *  o DMA binding and breaking up the locking in ring recycling.
 370  *  o Enhanced detection of device errors
 371  *  o Participation in IRM
 372  *  o FMA device reset
 373  *  o Stall detection, temperature error detection, etc.
 374  *  o More dynamic resource pools
 375  */
 376 
 377 #include "i40e_sw.h"
 378 
 379 static char i40e_ident[] = "Intel 10/40Gb Ethern0t v1.0.3";
 380 
 381 /*
 382  * The i40e_glock primarily protects the lists below and the i40e_device_t
 383  * structures.
 384  */
 385 static kmutex_t i40e_glock;
 386 static list_t i40e_glist;
 387 static list_t i40e_dlist;
 388 
 389 /*
 390  * Access attributes for register mapping.
 391  */
 392 static ddi_device_acc_attr_t i40e_regs_acc_attr = {
 393         DDI_DEVICE_ATTR_V1,
 394         DDI_STRUCTURE_LE_ACC,
 395         DDI_STRICTORDER_ACC,
 396         DDI_FLAGERR_ACC
 397 };
 398 
 399 /*
 400  * Logging function for this driver.
 401  */
 402 static void
 403 i40e_dev_err(i40e_t *i40e, int level, boolean_t console, const char *fmt,
 404     va_list ap)
 405 {
 406         char buf[1024];
 407 
 408         (void) vsnprintf(buf, sizeof (buf), fmt, ap);
 409 
 410         if (i40e == NULL) {
 411                 cmn_err(level, (console) ? "%s: %s" : "!%s: %s",
 412                     I40E_MODULE_NAME, buf);
 413         } else {
 414                 dev_err(i40e->i40e_dip, level, (console) ? "%s" : "!%s",
 415                     buf);
 416         }
 417 }
 418 
 419 /*
 420  * Because there's the stupid trailing-comma problem with the C preprocessor
 421  * and variable arguments, I need to instantiate these.  Pardon the redundant
 422  * code.
 423  */
 424 /*PRINTFLIKE2*/
 425 void
 426 i40e_error(i40e_t *i40e, const char *fmt, ...)
 427 {
 428         va_list ap;
 429 
 430         va_start(ap, fmt);
 431         i40e_dev_err(i40e, CE_WARN, B_FALSE, fmt, ap);
 432         va_end(ap);
 433 }
 434 
 435 /*PRINTFLIKE2*/
 436 void
 437 i40e_log(i40e_t *i40e, const char *fmt, ...)
 438 {
 439         va_list ap;
 440 
 441         va_start(ap, fmt);
 442         i40e_dev_err(i40e, CE_NOTE, B_FALSE, fmt, ap);
 443         va_end(ap);
 444 }
 445 
 446 /*PRINTFLIKE2*/
 447 void
 448 i40e_notice(i40e_t *i40e, const char *fmt, ...)
 449 {
 450         va_list ap;
 451 
 452         va_start(ap, fmt);
 453         i40e_dev_err(i40e, CE_NOTE, B_TRUE, fmt, ap);
 454         va_end(ap);
 455 }
 456 
 457 /*
 458  * Various parts of the driver need to know if the controller is from the X722
 459  * family, which has a few additional capabilities and different programming
 460  * means. We don't consider virtual functions as part of this as they are quite
 461  * different and will require substantially more work.
 462  */
 463 static boolean_t
 464 i40e_is_x722(i40e_t *i40e)
 465 {
 466         return (i40e->i40e_hw_space.mac.type == I40E_MAC_X722);
 467 }
 468 
 469 static void
 470 i40e_device_rele(i40e_t *i40e)
 471 {
 472         i40e_device_t *idp = i40e->i40e_device;
 473 
 474         if (idp == NULL)
 475                 return;
 476 
 477         mutex_enter(&i40e_glock);
 478         VERIFY(idp->id_nreg > 0);
 479         list_remove(&idp->id_i40e_list, i40e);
 480         idp->id_nreg--;
 481         if (idp->id_nreg == 0) {
 482                 list_remove(&i40e_dlist, idp);
 483                 list_destroy(&idp->id_i40e_list);
 484                 kmem_free(idp->id_rsrcs, sizeof (i40e_switch_rsrc_t) *
 485                     idp->id_rsrcs_alloc);
 486                 kmem_free(idp, sizeof (i40e_device_t));
 487         }
 488         i40e->i40e_device = NULL;
 489         mutex_exit(&i40e_glock);
 490 }
 491 
 492 static i40e_device_t *
 493 i40e_device_find(i40e_t *i40e, dev_info_t *parent, uint_t bus, uint_t device)
 494 {
 495         i40e_device_t *idp;
 496         mutex_enter(&i40e_glock);
 497         for (idp = list_head(&i40e_dlist); idp != NULL;
 498             idp = list_next(&i40e_dlist, idp)) {
 499                 if (idp->id_parent == parent && idp->id_pci_bus == bus &&
 500                     idp->id_pci_device == device) {
 501                         break;
 502                 }
 503         }
 504 
 505         if (idp != NULL) {
 506                 VERIFY(idp->id_nreg < idp->id_nfuncs);
 507                 idp->id_nreg++;
 508         } else {
 509                 i40e_hw_t *hw = &i40e->i40e_hw_space;
 510                 ASSERT(hw->num_ports > 0);
 511                 ASSERT(hw->num_partitions > 0);
 512 
 513                 /*
 514                  * The Intel common code doesn't exactly keep the number of PCI
 515                  * functions. But it calculates it during discovery of
 516                  * partitions and ports. So what we do is undo the calculation
 517                  * that it does originally, as functions are evenly spread
 518                  * across ports in the rare case of partitions.
 519                  */
 520                 idp = kmem_alloc(sizeof (i40e_device_t), KM_SLEEP);
 521                 idp->id_parent = parent;
 522                 idp->id_pci_bus = bus;
 523                 idp->id_pci_device = device;
 524                 idp->id_nfuncs = hw->num_ports * hw->num_partitions;
 525                 idp->id_nreg = 1;
 526                 idp->id_rsrcs_alloc = i40e->i40e_switch_rsrc_alloc;
 527                 idp->id_rsrcs_act = i40e->i40e_switch_rsrc_actual;
 528                 idp->id_rsrcs = kmem_alloc(sizeof (i40e_switch_rsrc_t) *
 529                     idp->id_rsrcs_alloc, KM_SLEEP);
 530                 bcopy(i40e->i40e_switch_rsrcs, idp->id_rsrcs,
 531                     sizeof (i40e_switch_rsrc_t) * idp->id_rsrcs_alloc);
 532                 list_create(&idp->id_i40e_list, sizeof (i40e_t),
 533                     offsetof(i40e_t, i40e_dlink));
 534 
 535                 list_insert_tail(&i40e_dlist, idp);
 536         }
 537 
 538         list_insert_tail(&idp->id_i40e_list, i40e);
 539         mutex_exit(&i40e_glock);
 540 
 541         return (idp);
 542 }
 543 
 544 static void
 545 i40e_link_state_set(i40e_t *i40e, link_state_t state)
 546 {
 547         if (i40e->i40e_link_state == state)
 548                 return;
 549 
 550         i40e->i40e_link_state = state;
 551         mac_link_update(i40e->i40e_mac_hdl, i40e->i40e_link_state);
 552 }
 553 
 554 /*
 555  * This is a basic link check routine. Mostly we're using this just to see
 556  * if we can get any accurate information about the state of the link being
 557  * up or down, as well as updating the link state, speed, etc. information.
 558  */
 559 void
 560 i40e_link_check(i40e_t *i40e)
 561 {
 562         i40e_hw_t *hw = &i40e->i40e_hw_space;
 563         boolean_t ls;
 564         int ret;
 565 
 566         ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
 567 
 568         hw->phy.get_link_info = B_TRUE;
 569         if ((ret = i40e_get_link_status(hw, &ls)) != I40E_SUCCESS) {
 570                 i40e->i40e_s_link_status_errs++;
 571                 i40e->i40e_s_link_status_lasterr = ret;
 572                 return;
 573         }
 574 
 575         /*
 576          * Firmware abstracts all of the mac and phy information for us, so we
 577          * can use i40e_get_link_status to determine the current state.
 578          */
 579         if (ls == B_TRUE) {
 580                 enum i40e_aq_link_speed speed;
 581 
 582                 speed = i40e_get_link_speed(hw);
 583 
 584                 /*
 585                  * Translate from an i40e value to a value in Mbits/s.
 586                  */
 587                 switch (speed) {
 588                 case I40E_LINK_SPEED_100MB:
 589                         i40e->i40e_link_speed = 100;
 590                         break;
 591                 case I40E_LINK_SPEED_1GB:
 592                         i40e->i40e_link_speed = 1000;
 593                         break;
 594                 case I40E_LINK_SPEED_2_5GB:
 595                         i40e->i40e_link_speed = 2500;
 596                         break;
 597                 case I40E_LINK_SPEED_5GB:
 598                         i40e->i40e_link_speed = 5000;
 599                         break;
 600                 case I40E_LINK_SPEED_10GB:
 601                         i40e->i40e_link_speed = 10000;
 602                         break;
 603                 case I40E_LINK_SPEED_20GB:
 604                         i40e->i40e_link_speed = 20000;
 605                         break;
 606                 case I40E_LINK_SPEED_40GB:
 607                         i40e->i40e_link_speed = 40000;
 608                         break;
 609                 case I40E_LINK_SPEED_25GB:
 610                         i40e->i40e_link_speed = 25000;
 611                         break;
 612                 default:
 613                         i40e->i40e_link_speed = 0;
 614                         break;
 615                 }
 616 
 617                 /*
 618                  * At this time, hardware does not support half-duplex
 619                  * operation, hence why we don't ask the hardware about our
 620                  * current speed.
 621                  */
 622                 i40e->i40e_link_duplex = LINK_DUPLEX_FULL;
 623                 i40e_link_state_set(i40e, LINK_STATE_UP);
 624         } else {
 625                 i40e->i40e_link_speed = 0;
 626                 i40e->i40e_link_duplex = 0;
 627                 i40e_link_state_set(i40e, LINK_STATE_DOWN);
 628         }
 629 }
 630 
 631 static void
 632 i40e_rem_intrs(i40e_t *i40e)
 633 {
 634         int i, rc;
 635 
 636         for (i = 0; i < i40e->i40e_intr_count; i++) {
 637                 rc = ddi_intr_free(i40e->i40e_intr_handles[i]);
 638                 if (rc != DDI_SUCCESS) {
 639                         i40e_log(i40e, "failed to free interrupt %d: %d",
 640                             i, rc);
 641                 }
 642         }
 643 
 644         kmem_free(i40e->i40e_intr_handles, i40e->i40e_intr_size);
 645         i40e->i40e_intr_handles = NULL;
 646 }
 647 
 648 static void
 649 i40e_rem_intr_handlers(i40e_t *i40e)
 650 {
 651         int i, rc;
 652 
 653         for (i = 0; i < i40e->i40e_intr_count; i++) {
 654                 rc = ddi_intr_remove_handler(i40e->i40e_intr_handles[i]);
 655                 if (rc != DDI_SUCCESS) {
 656                         i40e_log(i40e, "failed to remove interrupt %d: %d",
 657                             i, rc);
 658                 }
 659         }
 660 }
 661 
 662 /*
 663  * illumos Fault Management Architecture (FMA) support.
 664  */
 665 
 666 int
 667 i40e_check_acc_handle(ddi_acc_handle_t handle)
 668 {
 669         ddi_fm_error_t de;
 670 
 671         ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
 672         ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
 673         return (de.fme_status);
 674 }
 675 
 676 int
 677 i40e_check_dma_handle(ddi_dma_handle_t handle)
 678 {
 679         ddi_fm_error_t de;
 680 
 681         ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
 682         return (de.fme_status);
 683 }
 684 
 685 /*
 686  * Fault service error handling callback function.
 687  */
 688 /* ARGSUSED */
 689 static int
 690 i40e_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
 691 {
 692         pci_ereport_post(dip, err, NULL);
 693         return (err->fme_status);
 694 }
 695 
 696 static void
 697 i40e_fm_init(i40e_t *i40e)
 698 {
 699         ddi_iblock_cookie_t iblk;
 700 
 701         i40e->i40e_fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY,
 702             i40e->i40e_dip, DDI_PROP_DONTPASS, "fm_capable",
 703             DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
 704             DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
 705 
 706         if (i40e->i40e_fm_capabilities < 0) {
 707                 i40e->i40e_fm_capabilities = 0;
 708         } else if (i40e->i40e_fm_capabilities > 0xf) {
 709                 i40e->i40e_fm_capabilities = DDI_FM_EREPORT_CAPABLE |
 710                     DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
 711                     DDI_FM_ERRCB_CAPABLE;
 712         }
 713 
 714         /*
 715          * Only register with IO Fault Services if we have some capability
 716          */
 717         if (i40e->i40e_fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
 718                 i40e_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
 719         } else {
 720                 i40e_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
 721         }
 722 
 723         if (i40e->i40e_fm_capabilities) {
 724                 ddi_fm_init(i40e->i40e_dip, &i40e->i40e_fm_capabilities, &iblk);
 725 
 726                 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) ||
 727                     DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) {
 728                         pci_ereport_setup(i40e->i40e_dip);
 729                 }
 730 
 731                 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) {
 732                         ddi_fm_handler_register(i40e->i40e_dip,
 733                             i40e_fm_error_cb, (void*)i40e);
 734                 }
 735         }
 736 
 737         if (i40e->i40e_fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
 738                 i40e_init_dma_attrs(i40e, B_TRUE);
 739         } else {
 740                 i40e_init_dma_attrs(i40e, B_FALSE);
 741         }
 742 }
 743 
 744 static void
 745 i40e_fm_fini(i40e_t *i40e)
 746 {
 747         if (i40e->i40e_fm_capabilities) {
 748 
 749                 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) ||
 750                     DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities))
 751                         pci_ereport_teardown(i40e->i40e_dip);
 752 
 753                 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities))
 754                         ddi_fm_handler_unregister(i40e->i40e_dip);
 755 
 756                 ddi_fm_fini(i40e->i40e_dip);
 757         }
 758 }
 759 
 760 void
 761 i40e_fm_ereport(i40e_t *i40e, char *detail)
 762 {
 763         uint64_t ena;
 764         char buf[FM_MAX_CLASS];
 765 
 766         (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
 767         ena = fm_ena_generate(0, FM_ENA_FMT1);
 768         if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities)) {
 769                 ddi_fm_ereport_post(i40e->i40e_dip, buf, ena, DDI_NOSLEEP,
 770                     FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
 771         }
 772 }
 773 
 774 /*
 775  * Here we're trying to set the SEID of the default VSI. In general,
 776  * when we come through and look at this shortly after attach, we
 777  * expect there to only be a single element present, which is the
 778  * default VSI. Importantly, each PF seems to not see any other
 779  * devices, in part because of the simple switch mode that we're
 780  * using. If for some reason, we see more artifacts, we'll need to
 781  * revisit what we're doing here.
 782  */
 783 static boolean_t
 784 i40e_set_def_vsi_seid(i40e_t *i40e)
 785 {
 786         i40e_hw_t *hw = &i40e->i40e_hw_space;
 787         struct i40e_aqc_get_switch_config_resp *sw_config;
 788         uint8_t aq_buf[I40E_AQ_LARGE_BUF];
 789         uint16_t next = 0;
 790         int rc;
 791 
 792         /* LINTED: E_BAD_PTR_CAST_ALIGN */
 793         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
 794         rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next,
 795             NULL);
 796         if (rc != I40E_SUCCESS) {
 797                 i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d",
 798                     rc, hw->aq.asq_last_status);
 799                 return (B_FALSE);
 800         }
 801 
 802         if (LE_16(sw_config->header.num_reported) != 1) {
 803                 i40e_error(i40e, "encountered multiple (%d) switching units "
 804                     "during attach, not proceeding",
 805                     LE_16(sw_config->header.num_reported));
 806                 return (B_FALSE);
 807         }
 808 
 809         I40E_DEF_VSI_SEID(i40e) = sw_config->element[0].seid;
 810         return (B_TRUE);
 811 }
 812 
 813 /*
 814  * Get the SEID of the uplink MAC.
 815  */
 816 static int
 817 i40e_get_mac_seid(i40e_t *i40e)
 818 {
 819         i40e_hw_t *hw = &i40e->i40e_hw_space;
 820         struct i40e_aqc_get_switch_config_resp *sw_config;
 821         uint8_t aq_buf[I40E_AQ_LARGE_BUF];
 822         uint16_t next = 0;
 823         int rc;
 824 
 825         /* LINTED: E_BAD_PTR_CAST_ALIGN */
 826         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
 827         rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next,
 828             NULL);
 829         if (rc != I40E_SUCCESS) {
 830                 i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d",
 831                     rc, hw->aq.asq_last_status);
 832                 return (-1);
 833         }
 834 
 835         return (LE_16(sw_config->element[0].uplink_seid));
 836 }
 837 
 838 /*
 839  * We need to fill the i40e_hw_t structure with the capabilities of this PF. We
 840  * must also provide the memory for it; however, we don't need to keep it around
 841  * to the call to the common code. It takes it and parses it into an internal
 842  * structure.
 843  */
 844 static boolean_t
 845 i40e_get_hw_capabilities(i40e_t *i40e, i40e_hw_t *hw)
 846 {
 847         struct i40e_aqc_list_capabilities_element_resp *buf;
 848         int rc;
 849         size_t len;
 850         uint16_t needed;
 851         int nelems = I40E_HW_CAP_DEFAULT;
 852 
 853         len = nelems * sizeof (*buf);
 854 
 855         for (;;) {
 856                 ASSERT(len > 0);
 857                 buf = kmem_alloc(len, KM_SLEEP);
 858                 rc = i40e_aq_discover_capabilities(hw, buf, len,
 859                     &needed, i40e_aqc_opc_list_func_capabilities, NULL);
 860                 kmem_free(buf, len);
 861 
 862                 if (hw->aq.asq_last_status == I40E_AQ_RC_ENOMEM &&
 863                     nelems == I40E_HW_CAP_DEFAULT) {
 864                         if (nelems == needed) {
 865                                 i40e_error(i40e, "Capability discovery failed "
 866                                     "due to byzantine common code");
 867                                 return (B_FALSE);
 868                         }
 869                         len = needed;
 870                         continue;
 871                 } else if (rc != I40E_SUCCESS ||
 872                     hw->aq.asq_last_status != I40E_AQ_RC_OK) {
 873                         i40e_error(i40e, "Capability discovery failed: %d", rc);
 874                         return (B_FALSE);
 875                 }
 876 
 877                 break;
 878         }
 879 
 880         return (B_TRUE);
 881 }
 882 
 883 /*
 884  * Obtain the switch's capabilities as seen by this PF and keep it around for
 885  * our later use.
 886  */
 887 static boolean_t
 888 i40e_get_switch_resources(i40e_t *i40e)
 889 {
 890         i40e_hw_t *hw = &i40e->i40e_hw_space;
 891         uint8_t cnt = 2;
 892         uint8_t act;
 893         size_t size;
 894         i40e_switch_rsrc_t *buf;
 895 
 896         for (;;) {
 897                 enum i40e_status_code ret;
 898                 size = cnt * sizeof (i40e_switch_rsrc_t);
 899                 ASSERT(size > 0);
 900                 if (size > UINT16_MAX)
 901                         return (B_FALSE);
 902                 buf = kmem_alloc(size, KM_SLEEP);
 903 
 904                 ret = i40e_aq_get_switch_resource_alloc(hw, &act, buf,
 905                     cnt, NULL);
 906                 if (ret == I40E_ERR_ADMIN_QUEUE_ERROR &&
 907                     hw->aq.asq_last_status == I40E_AQ_RC_EINVAL) {
 908                         kmem_free(buf, size);
 909                         cnt += I40E_SWITCH_CAP_DEFAULT;
 910                         continue;
 911                 } else if (ret != I40E_SUCCESS) {
 912                         kmem_free(buf, size);
 913                         i40e_error(i40e,
 914                             "failed to retrieve switch statistics: %d", ret);
 915                         return (B_FALSE);
 916                 }
 917 
 918                 break;
 919         }
 920 
 921         i40e->i40e_switch_rsrc_alloc = cnt;
 922         i40e->i40e_switch_rsrc_actual = act;
 923         i40e->i40e_switch_rsrcs = buf;
 924 
 925         return (B_TRUE);
 926 }
 927 
 928 static void
 929 i40e_cleanup_resources(i40e_t *i40e)
 930 {
 931         if (i40e->i40e_uaddrs != NULL) {
 932                 kmem_free(i40e->i40e_uaddrs, sizeof (i40e_uaddr_t) *
 933                     i40e->i40e_resources.ifr_nmacfilt);
 934                 i40e->i40e_uaddrs = NULL;
 935         }
 936 
 937         if (i40e->i40e_maddrs != NULL) {
 938                 kmem_free(i40e->i40e_maddrs, sizeof (i40e_maddr_t) *
 939                     i40e->i40e_resources.ifr_nmcastfilt);
 940                 i40e->i40e_maddrs = NULL;
 941         }
 942 
 943         if (i40e->i40e_switch_rsrcs != NULL) {
 944                 size_t sz = sizeof (i40e_switch_rsrc_t) *
 945                     i40e->i40e_switch_rsrc_alloc;
 946                 ASSERT(sz > 0);
 947                 kmem_free(i40e->i40e_switch_rsrcs, sz);
 948                 i40e->i40e_switch_rsrcs = NULL;
 949         }
 950 
 951         if (i40e->i40e_device != NULL)
 952                 i40e_device_rele(i40e);
 953 }
 954 
 955 static boolean_t
 956 i40e_get_available_resources(i40e_t *i40e)
 957 {
 958         dev_info_t *parent;
 959         uint16_t bus, device, func;
 960         uint_t nregs;
 961         int *regs, i;
 962         i40e_device_t *idp;
 963         i40e_hw_t *hw = &i40e->i40e_hw_space;
 964 
 965         parent = ddi_get_parent(i40e->i40e_dip);
 966 
 967         if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, i40e->i40e_dip, 0, "reg",
 968             &regs, &nregs) != DDI_PROP_SUCCESS) {
 969                 return (B_FALSE);
 970         }
 971 
 972         if (nregs < 1) {
 973                 ddi_prop_free(regs);
 974                 return (B_FALSE);
 975         }
 976 
 977         bus = PCI_REG_BUS_G(regs[0]);
 978         device = PCI_REG_DEV_G(regs[0]);
 979         func = PCI_REG_FUNC_G(regs[0]);
 980         ddi_prop_free(regs);
 981 
 982         i40e->i40e_hw_space.bus.func = func;
 983         i40e->i40e_hw_space.bus.device = device;
 984 
 985         if (i40e_get_switch_resources(i40e) == B_FALSE) {
 986                 return (B_FALSE);
 987         }
 988 
 989         /*
 990          * To calculate the total amount of a resource we have available, we
 991          * need to add how many our i40e_t thinks it has guaranteed, if any, and
 992          * then we need to go through and divide the number of available on the
 993          * device, which was snapshotted before anyone should have allocated
 994          * anything, and use that to derive how many are available from the
 995          * pool. Longer term, we may want to turn this into something that's
 996          * more of a pool-like resource that everything can share (though that
 997          * may require some more assistance from MAC).
 998          *
 999          * Though for transmit and receive queue pairs, we just have to ask
1000          * firmware instead.
1001          */
1002         idp = i40e_device_find(i40e, parent, bus, device);
1003         i40e->i40e_device = idp;
1004         i40e->i40e_resources.ifr_nvsis = 0;
1005         i40e->i40e_resources.ifr_nvsis_used = 0;
1006         i40e->i40e_resources.ifr_nmacfilt = 0;
1007         i40e->i40e_resources.ifr_nmacfilt_used = 0;
1008         i40e->i40e_resources.ifr_nmcastfilt = 0;
1009         i40e->i40e_resources.ifr_nmcastfilt_used = 0;
1010 
1011         for (i = 0; i < i40e->i40e_switch_rsrc_actual; i++) {
1012                 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i];
1013 
1014                 switch (srp->resource_type) {
1015                 case I40E_AQ_RESOURCE_TYPE_VSI:
1016                         i40e->i40e_resources.ifr_nvsis +=
1017                             LE_16(srp->guaranteed);
1018                         i40e->i40e_resources.ifr_nvsis_used = LE_16(srp->used);
1019                         break;
1020                 case I40E_AQ_RESOURCE_TYPE_MACADDR:
1021                         i40e->i40e_resources.ifr_nmacfilt +=
1022                             LE_16(srp->guaranteed);
1023                         i40e->i40e_resources.ifr_nmacfilt_used =
1024                             LE_16(srp->used);
1025                         break;
1026                 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
1027                         i40e->i40e_resources.ifr_nmcastfilt +=
1028                             LE_16(srp->guaranteed);
1029                         i40e->i40e_resources.ifr_nmcastfilt_used =
1030                             LE_16(srp->used);
1031                         break;
1032                 default:
1033                         break;
1034                 }
1035         }
1036 
1037         for (i = 0; i < idp->id_rsrcs_act; i++) {
1038                 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i];
1039                 switch (srp->resource_type) {
1040                 case I40E_AQ_RESOURCE_TYPE_VSI:
1041                         i40e->i40e_resources.ifr_nvsis +=
1042                             LE_16(srp->total_unalloced) / idp->id_nfuncs;
1043                         break;
1044                 case I40E_AQ_RESOURCE_TYPE_MACADDR:
1045                         i40e->i40e_resources.ifr_nmacfilt +=
1046                             LE_16(srp->total_unalloced) / idp->id_nfuncs;
1047                         break;
1048                 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
1049                         i40e->i40e_resources.ifr_nmcastfilt +=
1050                             LE_16(srp->total_unalloced) / idp->id_nfuncs;
1051                 default:
1052                         break;
1053                 }
1054         }
1055 
1056         i40e->i40e_resources.ifr_nrx_queue = hw->func_caps.num_rx_qp;
1057         i40e->i40e_resources.ifr_ntx_queue = hw->func_caps.num_tx_qp;
1058 
1059         i40e->i40e_uaddrs = kmem_zalloc(sizeof (i40e_uaddr_t) *
1060             i40e->i40e_resources.ifr_nmacfilt, KM_SLEEP);
1061         i40e->i40e_maddrs = kmem_zalloc(sizeof (i40e_maddr_t) *
1062             i40e->i40e_resources.ifr_nmcastfilt, KM_SLEEP);
1063 
1064         /*
1065          * Initialize these as multicast addresses to indicate it's invalid for
1066          * sanity purposes. Think of it like 0xdeadbeef.
1067          */
1068         for (i = 0; i < i40e->i40e_resources.ifr_nmacfilt; i++)
1069                 i40e->i40e_uaddrs[i].iua_mac[0] = 0x01;
1070 
1071         return (B_TRUE);
1072 }
1073 
1074 static boolean_t
1075 i40e_enable_interrupts(i40e_t *i40e)
1076 {
1077         int i, rc;
1078 
1079         if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) {
1080                 rc = ddi_intr_block_enable(i40e->i40e_intr_handles,
1081                     i40e->i40e_intr_count);
1082                 if (rc != DDI_SUCCESS) {
1083                         i40e_error(i40e, "Interrupt block-enable failed: %d",
1084                             rc);
1085                         return (B_FALSE);
1086                 }
1087         } else {
1088                 for (i = 0; i < i40e->i40e_intr_count; i++) {
1089                         rc = ddi_intr_enable(i40e->i40e_intr_handles[i]);
1090                         if (rc != DDI_SUCCESS) {
1091                                 i40e_error(i40e,
1092                                     "Failed to enable interrupt %d: %d", i, rc);
1093                                 while (--i >= 0) {
1094                                         (void) ddi_intr_disable(
1095                                             i40e->i40e_intr_handles[i]);
1096                                 }
1097                                 return (B_FALSE);
1098                         }
1099                 }
1100         }
1101 
1102         return (B_TRUE);
1103 }
1104 
1105 static boolean_t
1106 i40e_disable_interrupts(i40e_t *i40e)
1107 {
1108         int i, rc;
1109 
1110         if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) {
1111                 rc = ddi_intr_block_disable(i40e->i40e_intr_handles,
1112                     i40e->i40e_intr_count);
1113                 if (rc != DDI_SUCCESS) {
1114                         i40e_error(i40e,
1115                             "Interrupt block-disabled failed: %d", rc);
1116                         return (B_FALSE);
1117                 }
1118         } else {
1119                 for (i = 0; i < i40e->i40e_intr_count; i++) {
1120                         rc = ddi_intr_disable(i40e->i40e_intr_handles[i]);
1121                         if (rc != DDI_SUCCESS) {
1122                                 i40e_error(i40e,
1123                                     "Failed to disable interrupt %d: %d",
1124                                     i, rc);
1125                                 return (B_FALSE);
1126                         }
1127                 }
1128         }
1129 
1130         return (B_TRUE);
1131 }
1132 
1133 /*
1134  * Free receive & transmit rings.
1135  */
1136 static void
1137 i40e_free_trqpairs(i40e_t *i40e)
1138 {
1139         i40e_trqpair_t *itrq;
1140 
1141         if (i40e->i40e_rx_groups != NULL) {
1142                 kmem_free(i40e->i40e_rx_groups,
1143                     sizeof (i40e_rx_group_t) * i40e->i40e_num_rx_groups);
1144                 i40e->i40e_rx_groups = NULL;
1145         }
1146 
1147         if (i40e->i40e_trqpairs != NULL) {
1148                 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1149                         itrq = &i40e->i40e_trqpairs[i];
1150                         mutex_destroy(&itrq->itrq_intr_lock);
1151                         mutex_destroy(&itrq->itrq_rx_lock);
1152                         mutex_destroy(&itrq->itrq_tx_lock);
1153                         mutex_destroy(&itrq->itrq_tcb_lock);
1154                         cv_destroy(&itrq->itrq_intr_cv);
1155                         cv_destroy(&itrq->itrq_tx_cv);
1156 
1157                         i40e_stats_trqpair_fini(itrq);
1158                 }
1159 
1160                 kmem_free(i40e->i40e_trqpairs,
1161                     sizeof (i40e_trqpair_t) * i40e->i40e_num_trqpairs);
1162                 i40e->i40e_trqpairs = NULL;
1163         }
1164 
1165         cv_destroy(&i40e->i40e_rx_pending_cv);
1166         mutex_destroy(&i40e->i40e_rx_pending_lock);
1167         mutex_destroy(&i40e->i40e_general_lock);
1168 }
1169 
1170 /*
1171  * Allocate transmit and receive rings, as well as other data structures that we
1172  * need.
1173  */
1174 static boolean_t
1175 i40e_alloc_trqpairs(i40e_t *i40e)
1176 {
1177         void *mutexpri = DDI_INTR_PRI(i40e->i40e_intr_pri);
1178 
1179         /*
1180          * Now that we have the priority for the interrupts, initialize
1181          * all relevant locks.
1182          */
1183         mutex_init(&i40e->i40e_general_lock, NULL, MUTEX_DRIVER, mutexpri);
1184         mutex_init(&i40e->i40e_rx_pending_lock, NULL, MUTEX_DRIVER, mutexpri);
1185         cv_init(&i40e->i40e_rx_pending_cv, NULL, CV_DRIVER, NULL);
1186 
1187         i40e->i40e_trqpairs = kmem_zalloc(sizeof (i40e_trqpair_t) *
1188             i40e->i40e_num_trqpairs, KM_SLEEP);
1189         for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1190                 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i];
1191 
1192                 itrq->itrq_i40e = i40e;
1193                 mutex_init(&itrq->itrq_intr_lock, NULL, MUTEX_DRIVER, mutexpri);
1194                 mutex_init(&itrq->itrq_rx_lock, NULL, MUTEX_DRIVER, mutexpri);
1195                 mutex_init(&itrq->itrq_tx_lock, NULL, MUTEX_DRIVER, mutexpri);
1196                 mutex_init(&itrq->itrq_tcb_lock, NULL, MUTEX_DRIVER, mutexpri);
1197                 cv_init(&itrq->itrq_intr_cv, NULL, CV_DRIVER, NULL);
1198                 cv_init(&itrq->itrq_tx_cv, NULL, CV_DRIVER, NULL);
1199                 itrq->itrq_index = i;
1200                 itrq->itrq_intr_quiesce = B_TRUE;
1201                 itrq->itrq_tx_quiesce = B_TRUE;
1202         }
1203 
1204         for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1205                 /*
1206                  * Keeping this in a separate iteration makes the
1207                  * clean up path safe.
1208                  */
1209                 if (!i40e_stats_trqpair_init(&i40e->i40e_trqpairs[i])) {
1210                         i40e_free_trqpairs(i40e);
1211                         return (B_FALSE);
1212                 }
1213         }
1214 
1215         i40e->i40e_rx_groups = kmem_zalloc(sizeof (i40e_rx_group_t) *
1216             i40e->i40e_num_rx_groups, KM_SLEEP);
1217 
1218         for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) {
1219                 i40e_rx_group_t *rxg = &i40e->i40e_rx_groups[i];
1220 
1221                 rxg->irg_index = i;
1222                 rxg->irg_i40e = i40e;
1223         }
1224 
1225         return (B_TRUE);
1226 }
1227 
1228 
1229 
1230 /*
1231  * Unless a .conf file already overrode i40e_t structure values, they will
1232  * be 0, and need to be set in conjunction with the now-available HW report.
1233  */
1234 /* ARGSUSED */
1235 static void
1236 i40e_hw_to_instance(i40e_t *i40e, i40e_hw_t *hw)
1237 {
1238         if (i40e->i40e_num_trqpairs_per_vsi == 0) {
1239                 if (i40e_is_x722(i40e)) {
1240                         i40e->i40e_num_trqpairs_per_vsi =
1241                             I40E_722_MAX_TC_QUEUES;
1242                 } else {
1243                         i40e->i40e_num_trqpairs_per_vsi =
1244                             I40E_710_MAX_TC_QUEUES;
1245                 }
1246         }
1247 
1248         if (i40e->i40e_num_rx_groups == 0) {
1249                 i40e->i40e_num_rx_groups = I40E_DEF_NUM_RX_GROUPS;
1250         }
1251 }
1252 
1253 /*
1254  * Free any resources required by, or setup by, the Intel common code.
1255  */
1256 static void
1257 i40e_common_code_fini(i40e_t *i40e)
1258 {
1259         i40e_hw_t *hw = &i40e->i40e_hw_space;
1260         int rc;
1261 
1262         rc = i40e_shutdown_lan_hmc(hw);
1263         if (rc != I40E_SUCCESS)
1264                 i40e_error(i40e, "failed to shutdown LAN hmc: %d", rc);
1265 
1266         rc = i40e_shutdown_adminq(hw);
1267         if (rc != I40E_SUCCESS)
1268                 i40e_error(i40e, "failed to shutdown admin queue: %d", rc);
1269 }
1270 
1271 /*
1272  * Initialize and call Intel common-code routines, includes some setup
1273  * the common code expects from the driver.  Also prints on failure, so
1274  * the caller doesn't have to.
1275  */
1276 static boolean_t
1277 i40e_common_code_init(i40e_t *i40e, i40e_hw_t *hw)
1278 {
1279         int rc;
1280 
1281         i40e_clear_hw(hw);
1282         rc = i40e_pf_reset(hw);
1283         if (rc != 0) {
1284                 i40e_error(i40e, "failed to reset hardware: %d", rc);
1285                 i40e_fm_ereport(i40e, DDI_FM_DEVICE_NO_RESPONSE);
1286                 return (B_FALSE);
1287         }
1288 
1289         rc = i40e_init_shared_code(hw);
1290         if (rc != 0) {
1291                 i40e_error(i40e, "failed to initialize i40e core: %d", rc);
1292                 return (B_FALSE);
1293         }
1294 
1295         hw->aq.num_arq_entries = I40E_DEF_ADMINQ_SIZE;
1296         hw->aq.num_asq_entries =  I40E_DEF_ADMINQ_SIZE;
1297         hw->aq.arq_buf_size = I40E_ADMINQ_BUFSZ;
1298         hw->aq.asq_buf_size = I40E_ADMINQ_BUFSZ;
1299 
1300         rc = i40e_init_adminq(hw);
1301         if (rc != 0) {
1302                 i40e_error(i40e, "failed to initialize firmware admin queue: "
1303                     "%d, potential firmware version mismatch", rc);
1304                 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE);
1305                 return (B_FALSE);
1306         }
1307 
1308         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1309             hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
1310                 i40e_log(i40e, "The driver for the device detected a newer "
1311                     "version of the NVM image (%d.%d) than expected (%d.%d).\n"
1312                     "Please install the most recent version of the network "
1313                     "driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver,
1314                     I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw));
1315         } else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
1316             hw->aq.api_min_ver < (I40E_FW_MINOR_VERSION(hw) - 1)) {
1317                 i40e_log(i40e, "The driver for the device detected an older"
1318                     " version of the NVM image (%d.%d) than expected (%d.%d)."
1319                     "\nPlease update the NVM image.\n",
1320                     hw->aq.api_maj_ver, hw->aq.api_min_ver,
1321                     I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw) - 1);
1322         }
1323 
1324         i40e_clear_pxe_mode(hw);
1325 
1326         /*
1327          * We need to call this so that the common code can discover
1328          * capabilities of the hardware, which it uses throughout the rest.
1329          */
1330         if (!i40e_get_hw_capabilities(i40e, hw)) {
1331                 i40e_error(i40e, "failed to obtain hardware capabilities");
1332                 return (B_FALSE);
1333         }
1334 
1335         if (i40e_get_available_resources(i40e) == B_FALSE) {
1336                 i40e_error(i40e, "failed to obtain hardware resources");
1337                 return (B_FALSE);
1338         }
1339 
1340         i40e_hw_to_instance(i40e, hw);
1341 
1342         rc = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1343             hw->func_caps.num_rx_qp, 0, 0);
1344         if (rc != 0) {
1345                 i40e_error(i40e, "failed to initialize hardware memory cache: "
1346                     "%d", rc);
1347                 return (B_FALSE);
1348         }
1349 
1350         rc = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1351         if (rc != 0) {
1352                 i40e_error(i40e, "failed to configure hardware memory cache: "
1353                     "%d", rc);
1354                 return (B_FALSE);
1355         }
1356 
1357         (void) i40e_aq_stop_lldp(hw, TRUE, FALSE, NULL);
1358 
1359         rc = i40e_get_mac_addr(hw, hw->mac.addr);
1360         if (rc != I40E_SUCCESS) {
1361                 i40e_error(i40e, "failed to retrieve hardware mac address: %d",
1362                     rc);
1363                 return (B_FALSE);
1364         }
1365 
1366         rc = i40e_validate_mac_addr(hw->mac.addr);
1367         if (rc != 0) {
1368                 i40e_error(i40e, "failed to validate internal mac address: "
1369                     "%d", rc);
1370                 return (B_FALSE);
1371         }
1372         bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
1373         if ((rc = i40e_get_port_mac_addr(hw, hw->mac.port_addr)) !=
1374             I40E_SUCCESS) {
1375                 i40e_error(i40e, "failed to retrieve port mac address: %d",
1376                     rc);
1377                 return (B_FALSE);
1378         }
1379 
1380         /*
1381          * We need to obtain the Default Virtual Station SEID (VSI)
1382          * before we can perform other operations on the device.
1383          */
1384         if (!i40e_set_def_vsi_seid(i40e)) {
1385                 i40e_error(i40e, "failed to obtain Default VSI SEID");
1386                 return (B_FALSE);
1387         }
1388 
1389         return (B_TRUE);
1390 }
1391 
1392 static void
1393 i40e_unconfigure(dev_info_t *devinfo, i40e_t *i40e)
1394 {
1395         int rc;
1396 
1397         if (i40e->i40e_attach_progress & I40E_ATTACH_ENABLE_INTR)
1398                 (void) i40e_disable_interrupts(i40e);
1399 
1400         if ((i40e->i40e_attach_progress & I40E_ATTACH_LINK_TIMER) &&
1401             i40e->i40e_periodic_id != 0) {
1402                 ddi_periodic_delete(i40e->i40e_periodic_id);
1403                 i40e->i40e_periodic_id = 0;
1404         }
1405 
1406         if (i40e->i40e_attach_progress & I40E_ATTACH_UFM_INIT)
1407                 ddi_ufm_fini(i40e->i40e_ufmh);
1408 
1409         if (i40e->i40e_attach_progress & I40E_ATTACH_MAC) {
1410                 rc = mac_unregister(i40e->i40e_mac_hdl);
1411                 if (rc != 0) {
1412                         i40e_error(i40e, "failed to unregister from mac: %d",
1413                             rc);
1414                 }
1415         }
1416 
1417         if (i40e->i40e_attach_progress & I40E_ATTACH_STATS) {
1418                 i40e_stats_fini(i40e);
1419         }
1420 
1421         if (i40e->i40e_attach_progress & I40E_ATTACH_ADD_INTR)
1422                 i40e_rem_intr_handlers(i40e);
1423 
1424         if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_RINGSLOCKS)
1425                 i40e_free_trqpairs(i40e);
1426 
1427         if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_INTR)
1428                 i40e_rem_intrs(i40e);
1429 
1430         if (i40e->i40e_attach_progress & I40E_ATTACH_COMMON_CODE)
1431                 i40e_common_code_fini(i40e);
1432 
1433         i40e_cleanup_resources(i40e);
1434 
1435         if (i40e->i40e_attach_progress & I40E_ATTACH_PROPS)
1436                 (void) ddi_prop_remove_all(devinfo);
1437 
1438         if (i40e->i40e_attach_progress & I40E_ATTACH_REGS_MAP &&
1439             i40e->i40e_osdep_space.ios_reg_handle != NULL) {
1440                 ddi_regs_map_free(&i40e->i40e_osdep_space.ios_reg_handle);
1441                 i40e->i40e_osdep_space.ios_reg_handle = NULL;
1442         }
1443 
1444         if ((i40e->i40e_attach_progress & I40E_ATTACH_PCI_CONFIG) &&
1445             i40e->i40e_osdep_space.ios_cfg_handle != NULL) {
1446                 pci_config_teardown(&i40e->i40e_osdep_space.ios_cfg_handle);
1447                 i40e->i40e_osdep_space.ios_cfg_handle = NULL;
1448         }
1449 
1450         if (i40e->i40e_attach_progress & I40E_ATTACH_FM_INIT)
1451                 i40e_fm_fini(i40e);
1452 
1453         kmem_free(i40e->i40e_aqbuf, I40E_ADMINQ_BUFSZ);
1454         kmem_free(i40e, sizeof (i40e_t));
1455 
1456         ddi_set_driver_private(devinfo, NULL);
1457 }
1458 
1459 static boolean_t
1460 i40e_final_init(i40e_t *i40e)
1461 {
1462         i40e_hw_t *hw = &i40e->i40e_hw_space;
1463         struct i40e_osdep *osdep = OS_DEP(hw);
1464         uint8_t pbanum[I40E_PBANUM_STRLEN];
1465         enum i40e_status_code irc;
1466         char buf[I40E_DDI_PROP_LEN];
1467 
1468         pbanum[0] = '\0';
1469         irc = i40e_read_pba_string(hw, pbanum, sizeof (pbanum));
1470         if (irc != I40E_SUCCESS) {
1471                 i40e_log(i40e, "failed to read PBA string: %d", irc);
1472         } else {
1473                 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1474                     "printed-board-assembly", (char *)pbanum);
1475         }
1476 
1477 #ifdef  DEBUG
1478         ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.fw_maj_ver,
1479             hw->aq.fw_min_ver) < sizeof (buf));
1480         ASSERT(snprintf(NULL, 0, "%x", hw->aq.fw_build) < sizeof (buf));
1481         ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.api_maj_ver,
1482             hw->aq.api_min_ver) < sizeof (buf));
1483 #endif
1484 
1485         (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.fw_maj_ver,
1486             hw->aq.fw_min_ver);
1487         (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1488             "firmware-version", buf);
1489         (void) snprintf(buf, sizeof (buf), "%x", hw->aq.fw_build);
1490         (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1491             "firmware-build", buf);
1492         (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.api_maj_ver,
1493             hw->aq.api_min_ver);
1494         (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1495             "api-version", buf);
1496 
1497         if (!i40e_set_hw_bus_info(hw))
1498                 return (B_FALSE);
1499 
1500         if (i40e_check_acc_handle(osdep->ios_reg_handle) != DDI_FM_OK) {
1501                 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
1502                 return (B_FALSE);
1503         }
1504 
1505         return (B_TRUE);
1506 }
1507 
1508 static void
1509 i40e_identify_hardware(i40e_t *i40e)
1510 {
1511         i40e_hw_t *hw = &i40e->i40e_hw_space;
1512         struct i40e_osdep *osdep = &i40e->i40e_osdep_space;
1513 
1514         hw->vendor_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_VENID);
1515         hw->device_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_DEVID);
1516         hw->revision_id = pci_config_get8(osdep->ios_cfg_handle,
1517             PCI_CONF_REVID);
1518         hw->subsystem_device_id =
1519             pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBSYSID);
1520         hw->subsystem_vendor_id =
1521             pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBVENID);
1522 
1523         /*
1524          * Note that we set the hardware's bus information later on, in
1525          * i40e_get_available_resources(). The common code doesn't seem to
1526          * require that it be set in any ways, it seems to be mostly for
1527          * book-keeping.
1528          */
1529 }
1530 
1531 static boolean_t
1532 i40e_regs_map(i40e_t *i40e)
1533 {
1534         dev_info_t *devinfo = i40e->i40e_dip;
1535         i40e_hw_t *hw = &i40e->i40e_hw_space;
1536         struct i40e_osdep *osdep = &i40e->i40e_osdep_space;
1537         off_t memsize;
1538         int ret;
1539 
1540         if (ddi_dev_regsize(devinfo, I40E_ADAPTER_REGSET, &memsize) !=
1541             DDI_SUCCESS) {
1542                 i40e_error(i40e, "Used invalid register set to map PCIe regs");
1543                 return (B_FALSE);
1544         }
1545 
1546         if ((ret = ddi_regs_map_setup(devinfo, I40E_ADAPTER_REGSET,
1547             (caddr_t *)&hw->hw_addr, 0, memsize, &i40e_regs_acc_attr,
1548             &osdep->ios_reg_handle)) != DDI_SUCCESS) {
1549                 i40e_error(i40e, "failed to map device registers: %d", ret);
1550                 return (B_FALSE);
1551         }
1552 
1553         osdep->ios_reg_size = memsize;
1554         return (B_TRUE);
1555 }
1556 
1557 /*
1558  * Update parameters required when a new MTU has been configured.  Calculate the
1559  * maximum frame size, as well as, size our DMA buffers which we size in
1560  * increments of 1K.
1561  */
1562 void
1563 i40e_update_mtu(i40e_t *i40e)
1564 {
1565         uint32_t rx, tx;
1566 
1567         i40e->i40e_frame_max = i40e->i40e_sdu +
1568             sizeof (struct ether_vlan_header) + ETHERFCSL;
1569 
1570         rx = i40e->i40e_frame_max + I40E_BUF_IPHDR_ALIGNMENT;
1571         i40e->i40e_rx_buf_size = ((rx >> 10) +
1572             ((rx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10;
1573 
1574         tx = i40e->i40e_frame_max;
1575         i40e->i40e_tx_buf_size = ((tx >> 10) +
1576             ((tx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10;
1577 }
1578 
1579 static int
1580 i40e_get_prop(i40e_t *i40e, char *prop, int min, int max, int def)
1581 {
1582         int val;
1583 
1584         val = ddi_prop_get_int(DDI_DEV_T_ANY, i40e->i40e_dip, DDI_PROP_DONTPASS,
1585             prop, def);
1586         if (val > max)
1587                 val = max;
1588         if (val < min)
1589                 val = min;
1590         return (val);
1591 }
1592 
1593 static void
1594 i40e_init_properties(i40e_t *i40e)
1595 {
1596         i40e->i40e_sdu = i40e_get_prop(i40e, "default_mtu",
1597             I40E_MIN_MTU, I40E_MAX_MTU, I40E_DEF_MTU);
1598 
1599         i40e->i40e_intr_force = i40e_get_prop(i40e, "intr_force",
1600             I40E_INTR_NONE, I40E_INTR_LEGACY, I40E_INTR_NONE);
1601 
1602         i40e->i40e_mr_enable = i40e_get_prop(i40e, "mr_enable",
1603             B_FALSE, B_TRUE, B_TRUE);
1604 
1605         i40e->i40e_tx_ring_size = i40e_get_prop(i40e, "tx_ring_size",
1606             I40E_MIN_TX_RING_SIZE, I40E_MAX_TX_RING_SIZE,
1607             I40E_DEF_TX_RING_SIZE);
1608         if ((i40e->i40e_tx_ring_size % I40E_DESC_ALIGN) != 0) {
1609                 i40e->i40e_tx_ring_size = P2ROUNDUP(i40e->i40e_tx_ring_size,
1610                     I40E_DESC_ALIGN);
1611         }
1612 
1613         i40e->i40e_tx_block_thresh = i40e_get_prop(i40e, "tx_resched_threshold",
1614             I40E_MIN_TX_BLOCK_THRESH,
1615             i40e->i40e_tx_ring_size - I40E_TX_MAX_COOKIE,
1616             I40E_DEF_TX_BLOCK_THRESH);
1617 
1618         i40e->i40e_num_rx_groups = i40e_get_prop(i40e, "rx_num_groups",
1619             I40E_MIN_NUM_RX_GROUPS, I40E_MAX_NUM_RX_GROUPS,
1620             I40E_DEF_NUM_RX_GROUPS);
1621 
1622         i40e->i40e_rx_ring_size = i40e_get_prop(i40e, "rx_ring_size",
1623             I40E_MIN_RX_RING_SIZE, I40E_MAX_RX_RING_SIZE,
1624             I40E_DEF_RX_RING_SIZE);
1625         if ((i40e->i40e_rx_ring_size % I40E_DESC_ALIGN) != 0) {
1626                 i40e->i40e_rx_ring_size = P2ROUNDUP(i40e->i40e_rx_ring_size,
1627                     I40E_DESC_ALIGN);
1628         }
1629 
1630         i40e->i40e_rx_limit_per_intr = i40e_get_prop(i40e, "rx_limit_per_intr",
1631             I40E_MIN_RX_LIMIT_PER_INTR, I40E_MAX_RX_LIMIT_PER_INTR,
1632             I40E_DEF_RX_LIMIT_PER_INTR);
1633 
1634         i40e->i40e_tx_hcksum_enable = i40e_get_prop(i40e, "tx_hcksum_enable",
1635             B_FALSE, B_TRUE, B_TRUE);
1636 
1637         i40e->i40e_tx_lso_enable = i40e_get_prop(i40e, "tx_lso_enable",
1638             B_FALSE, B_TRUE, B_TRUE);
1639 
1640         i40e->i40e_rx_hcksum_enable = i40e_get_prop(i40e, "rx_hcksum_enable",
1641             B_FALSE, B_TRUE, B_TRUE);
1642 
1643         i40e->i40e_rx_dma_min = i40e_get_prop(i40e, "rx_dma_threshold",
1644             I40E_MIN_RX_DMA_THRESH, I40E_MAX_RX_DMA_THRESH,
1645             I40E_DEF_RX_DMA_THRESH);
1646 
1647         i40e->i40e_tx_dma_min = i40e_get_prop(i40e, "tx_dma_threshold",
1648             I40E_MIN_TX_DMA_THRESH, I40E_MAX_TX_DMA_THRESH,
1649             I40E_DEF_TX_DMA_THRESH);
1650 
1651         i40e->i40e_tx_itr = i40e_get_prop(i40e, "tx_intr_throttle",
1652             I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_TX_ITR);
1653 
1654         i40e->i40e_rx_itr = i40e_get_prop(i40e, "rx_intr_throttle",
1655             I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_RX_ITR);
1656 
1657         i40e->i40e_other_itr = i40e_get_prop(i40e, "other_intr_throttle",
1658             I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_OTHER_ITR);
1659 
1660         if (!i40e->i40e_mr_enable) {
1661                 i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX;
1662                 i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX;
1663         }
1664 
1665         i40e_update_mtu(i40e);
1666 }
1667 
1668 /*
1669  * There are a few constraints on interrupts that we're currently imposing, some
1670  * of which are restrictions from hardware. For a fuller treatment, see
1671  * i40e_intr.c.
1672  *
1673  * Currently, to use MSI-X we require two interrupts be available though in
1674  * theory we should participate in IRM and happily use more interrupts.
1675  *
1676  * Hardware only supports a single MSI being programmed and therefore if we
1677  * don't have MSI-X interrupts available at this time, then we ratchet down the
1678  * number of rings and groups available. Obviously, we only bother with a single
1679  * fixed interrupt.
1680  */
1681 static boolean_t
1682 i40e_alloc_intr_handles(i40e_t *i40e, dev_info_t *devinfo, int intr_type)
1683 {
1684         i40e_hw_t *hw = &i40e->i40e_hw_space;
1685         ddi_acc_handle_t rh = i40e->i40e_osdep_space.ios_reg_handle;
1686         int request, count, actual, rc, min;
1687         uint32_t reg;
1688 
1689         switch (intr_type) {
1690         case DDI_INTR_TYPE_FIXED:
1691         case DDI_INTR_TYPE_MSI:
1692                 request = 1;
1693                 min = 1;
1694                 break;
1695         case DDI_INTR_TYPE_MSIX:
1696                 min = 2;
1697                 if (!i40e->i40e_mr_enable) {
1698                         request = 2;
1699                         break;
1700                 }
1701                 reg = I40E_READ_REG(hw, I40E_GLPCI_CNF2);
1702                 /*
1703                  * Should this read fail, we will drop back to using
1704                  * MSI or fixed interrupts.
1705                  */
1706                 if (i40e_check_acc_handle(rh) != DDI_FM_OK) {
1707                         ddi_fm_service_impact(i40e->i40e_dip,
1708                             DDI_SERVICE_DEGRADED);
1709                         return (B_FALSE);
1710                 }
1711                 request = (reg & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1712                     I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1713                 request++;      /* the register value is n - 1 */
1714                 break;
1715         default:
1716                 panic("bad interrupt type passed to i40e_alloc_intr_handles: "
1717                     "%d", intr_type);
1718         }
1719 
1720         rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
1721         if (rc != DDI_SUCCESS || count < min) {
1722                 i40e_log(i40e, "Get interrupt number failed, "
1723                     "returned %d, count %d", rc, count);
1724                 return (B_FALSE);
1725         }
1726 
1727         rc = ddi_intr_get_navail(devinfo, intr_type, &count);
1728         if (rc != DDI_SUCCESS || count < min) {
1729                 i40e_log(i40e, "Get AVAILABLE interrupt number failed, "
1730                     "returned %d, count %d", rc, count);
1731                 return (B_FALSE);
1732         }
1733 
1734         actual = 0;
1735         i40e->i40e_intr_count = 0;
1736         i40e->i40e_intr_count_max = 0;
1737         i40e->i40e_intr_count_min = 0;
1738 
1739         i40e->i40e_intr_size = request * sizeof (ddi_intr_handle_t);
1740         ASSERT(i40e->i40e_intr_size != 0);
1741         i40e->i40e_intr_handles = kmem_alloc(i40e->i40e_intr_size, KM_SLEEP);
1742 
1743         rc = ddi_intr_alloc(devinfo, i40e->i40e_intr_handles, intr_type, 0,
1744             min(request, count), &actual, DDI_INTR_ALLOC_NORMAL);
1745         if (rc != DDI_SUCCESS) {
1746                 i40e_log(i40e, "Interrupt allocation failed with %d.", rc);
1747                 goto alloc_handle_fail;
1748         }
1749 
1750         i40e->i40e_intr_count = actual;
1751         i40e->i40e_intr_count_max = request;
1752         i40e->i40e_intr_count_min = min;
1753 
1754         if (actual < min) {
1755                 i40e_log(i40e, "actual (%d) is less than minimum (%d).",
1756                     actual, min);
1757                 goto alloc_handle_fail;
1758         }
1759 
1760         /*
1761          * Record the priority and capabilities for our first vector.  Once
1762          * we have it, that's our priority until detach time.  Even if we
1763          * eventually participate in IRM, our priority shouldn't change.
1764          */
1765         rc = ddi_intr_get_pri(i40e->i40e_intr_handles[0], &i40e->i40e_intr_pri);
1766         if (rc != DDI_SUCCESS) {
1767                 i40e_log(i40e,
1768                     "Getting interrupt priority failed with %d.", rc);
1769                 goto alloc_handle_fail;
1770         }
1771 
1772         rc = ddi_intr_get_cap(i40e->i40e_intr_handles[0], &i40e->i40e_intr_cap);
1773         if (rc != DDI_SUCCESS) {
1774                 i40e_log(i40e,
1775                     "Getting interrupt capabilities failed with %d.", rc);
1776                 goto alloc_handle_fail;
1777         }
1778 
1779         i40e->i40e_intr_type = intr_type;
1780         return (B_TRUE);
1781 
1782 alloc_handle_fail:
1783 
1784         i40e_rem_intrs(i40e);
1785         return (B_FALSE);
1786 }
1787 
1788 static boolean_t
1789 i40e_alloc_intrs(i40e_t *i40e, dev_info_t *devinfo)
1790 {
1791         i40e_hw_t *hw = &i40e->i40e_hw_space;
1792         int intr_types, rc;
1793         uint_t max_trqpairs;
1794 
1795         if (i40e_is_x722(i40e)) {
1796                 max_trqpairs = I40E_722_MAX_TC_QUEUES;
1797         } else {
1798                 max_trqpairs = I40E_710_MAX_TC_QUEUES;
1799         }
1800 
1801         rc = ddi_intr_get_supported_types(devinfo, &intr_types);
1802         if (rc != DDI_SUCCESS) {
1803                 i40e_error(i40e, "failed to get supported interrupt types: %d",
1804                     rc);
1805                 return (B_FALSE);
1806         }
1807 
1808         i40e->i40e_intr_type = 0;
1809 
1810         /*
1811          * We need to determine the number of queue pairs per traffic
1812          * class. We only have one traffic class (TC0), so we'll base
1813          * this off the number of interrupts provided. Furthermore,
1814          * since we only use one traffic class, the number of queues
1815          * per traffic class and per VSI are the same.
1816          */
1817         if ((intr_types & DDI_INTR_TYPE_MSIX) &&
1818             (i40e->i40e_intr_force <= I40E_INTR_MSIX) &&
1819             (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSIX))) {
1820                 uint32_t n, qp_cap, num_trqpairs;
1821 
1822                 /*
1823                  * While we want the number of queue pairs to match
1824                  * the number of interrupts, we must keep stay in
1825                  * bounds of the maximum number of queues per traffic
1826                  * class. We subtract one from i40e_intr_count to
1827                  * account for interrupt zero; which is currently
1828                  * restricted to admin queue commands and other
1829                  * interrupt causes.
1830                  */
1831                 n = MIN(i40e->i40e_intr_count - 1, max_trqpairs);
1832                 ASSERT3U(n, >, 0);
1833 
1834                 /*
1835                  * Round up to the nearest power of two to ensure that
1836                  * the QBASE aligns with the TC size which must be
1837                  * programmed as a power of two. See the queue mapping
1838                  * description in section 7.4.9.5.5.1.
1839                  *
1840                  * If i40e_intr_count - 1 is not a power of two then
1841                  * some queue pairs on the same VSI will have to share
1842                  * an interrupt.
1843                  *
1844                  * We may want to revisit this logic in a future where
1845                  * we have more interrupts and more VSIs. Otherwise,
1846                  * each VSI will use as many interrupts as possible.
1847                  * Using more QPs per VSI means better RSS for each
1848                  * group, but at the same time may require more
1849                  * sharing of interrupts across VSIs. This may be a
1850                  * good candidate for a .conf tunable.
1851                  */
1852                 n = 0x1 << ddi_fls(n);
1853                 i40e->i40e_num_trqpairs_per_vsi = n;
1854 
1855                 /*
1856                  * Make sure the number of tx/rx qpairs does not exceed
1857                  * the device's capabilities.
1858                  */
1859                 ASSERT3U(i40e->i40e_num_rx_groups, >, 0);
1860                 qp_cap = MIN(hw->func_caps.num_rx_qp, hw->func_caps.num_tx_qp);
1861                 num_trqpairs = i40e->i40e_num_trqpairs_per_vsi *
1862                     i40e->i40e_num_rx_groups;
1863                 if (num_trqpairs > qp_cap) {
1864                         i40e->i40e_num_rx_groups = MAX(1, qp_cap /
1865                             i40e->i40e_num_trqpairs_per_vsi);
1866                         num_trqpairs = i40e->i40e_num_trqpairs_per_vsi *
1867                             i40e->i40e_num_rx_groups;
1868                         i40e_log(i40e, "Rx groups restricted to %u",
1869                             i40e->i40e_num_rx_groups);
1870                 }
1871                 ASSERT3U(num_trqpairs, >, 0);
1872                 i40e->i40e_num_trqpairs = num_trqpairs;
1873                 return (B_TRUE);
1874         }
1875 
1876         /*
1877          * We only use multiple transmit/receive pairs when MSI-X interrupts are
1878          * available due to the fact that the device basically only supports a
1879          * single MSI interrupt.
1880          */
1881         i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX;
1882         i40e->i40e_num_trqpairs_per_vsi = i40e->i40e_num_trqpairs;
1883         i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX;
1884 
1885         if ((intr_types & DDI_INTR_TYPE_MSI) &&
1886             (i40e->i40e_intr_force <= I40E_INTR_MSI)) {
1887                 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSI))
1888                         return (B_TRUE);
1889         }
1890 
1891         if (intr_types & DDI_INTR_TYPE_FIXED) {
1892                 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_FIXED))
1893                         return (B_TRUE);
1894         }
1895 
1896         return (B_FALSE);
1897 }
1898 
1899 /*
1900  * Map different interrupts to MSI-X vectors.
1901  */
1902 static boolean_t
1903 i40e_map_intrs_to_vectors(i40e_t *i40e)
1904 {
1905         if (i40e->i40e_intr_type != DDI_INTR_TYPE_MSIX) {
1906                 return (B_TRUE);
1907         }
1908 
1909         /*
1910          * Each queue pair is mapped to a single interrupt, so
1911          * transmit and receive interrupts for a given queue share the
1912          * same vector. Vector zero is reserved for the admin queue.
1913          */
1914         for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1915                 uint_t vector = i % (i40e->i40e_intr_count - 1);
1916 
1917                 i40e->i40e_trqpairs[i].itrq_rx_intrvec = vector + 1;
1918                 i40e->i40e_trqpairs[i].itrq_tx_intrvec = vector + 1;
1919         }
1920 
1921         return (B_TRUE);
1922 }
1923 
1924 static boolean_t
1925 i40e_add_intr_handlers(i40e_t *i40e)
1926 {
1927         int rc, vector;
1928 
1929         switch (i40e->i40e_intr_type) {
1930         case DDI_INTR_TYPE_MSIX:
1931                 for (vector = 0; vector < i40e->i40e_intr_count; vector++) {
1932                         rc = ddi_intr_add_handler(
1933                             i40e->i40e_intr_handles[vector],
1934                             (ddi_intr_handler_t *)i40e_intr_msix, i40e,
1935                             (void *)(uintptr_t)vector);
1936                         if (rc != DDI_SUCCESS) {
1937                                 i40e_log(i40e, "Add interrupt handler (MSI-X) "
1938                                     "failed: return %d, vector %d", rc, vector);
1939                                 for (vector--; vector >= 0; vector--) {
1940                                         (void) ddi_intr_remove_handler(
1941                                             i40e->i40e_intr_handles[vector]);
1942                                 }
1943                                 return (B_FALSE);
1944                         }
1945                 }
1946                 break;
1947         case DDI_INTR_TYPE_MSI:
1948                 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0],
1949                     (ddi_intr_handler_t *)i40e_intr_msi, i40e, NULL);
1950                 if (rc != DDI_SUCCESS) {
1951                         i40e_log(i40e, "Add interrupt handler (MSI) failed: "
1952                             "return %d", rc);
1953                         return (B_FALSE);
1954                 }
1955                 break;
1956         case DDI_INTR_TYPE_FIXED:
1957                 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0],
1958                     (ddi_intr_handler_t *)i40e_intr_legacy, i40e, NULL);
1959                 if (rc != DDI_SUCCESS) {
1960                         i40e_log(i40e, "Add interrupt handler (legacy) failed:"
1961                             " return %d", rc);
1962                         return (B_FALSE);
1963                 }
1964                 break;
1965         default:
1966                 /* Cast to pacify lint */
1967                 panic("i40e_intr_type %p contains an unknown type: %d",
1968                     (void *)i40e, i40e->i40e_intr_type);
1969         }
1970 
1971         return (B_TRUE);
1972 }
1973 
1974 /*
1975  * Perform periodic checks. Longer term, we should be thinking about additional
1976  * things here:
1977  *
1978  * o Stall Detection
1979  * o Temperature sensor detection
1980  * o Device resetting
1981  * o Statistics updating to avoid wraparound
1982  */
1983 static void
1984 i40e_timer(void *arg)
1985 {
1986         i40e_t *i40e = arg;
1987 
1988         mutex_enter(&i40e->i40e_general_lock);
1989         i40e_link_check(i40e);
1990         mutex_exit(&i40e->i40e_general_lock);
1991 }
1992 
1993 /*
1994  * Get the hardware state, and scribble away anything that needs scribbling.
1995  */
1996 static void
1997 i40e_get_hw_state(i40e_t *i40e, i40e_hw_t *hw)
1998 {
1999         int rc;
2000 
2001         ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
2002 
2003         (void) i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2004         i40e_link_check(i40e);
2005 
2006         /*
2007          * Try and determine our PHY. Note that we may have to retry to and
2008          * delay to detect fiber correctly.
2009          */
2010         rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE, &i40e->i40e_phy,
2011             NULL);
2012         if (rc == I40E_ERR_UNKNOWN_PHY) {
2013                 i40e_msec_delay(200);
2014                 rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE,
2015                     &i40e->i40e_phy, NULL);
2016         }
2017 
2018         if (rc != I40E_SUCCESS) {
2019                 if (rc == I40E_ERR_UNKNOWN_PHY) {
2020                         i40e_error(i40e, "encountered unknown PHY type, "
2021                             "not attaching.");
2022                 } else {
2023                         i40e_error(i40e, "error getting physical capabilities: "
2024                             "%d, %d", rc, hw->aq.asq_last_status);
2025                 }
2026         }
2027 
2028         rc = i40e_update_link_info(hw);
2029         if (rc != I40E_SUCCESS) {
2030                 i40e_error(i40e, "failed to update link information: %d", rc);
2031         }
2032 
2033         /*
2034          * In general, we don't want to mask off (as in stop from being a cause)
2035          * any of the interrupts that the phy might be able to generate.
2036          */
2037         rc = i40e_aq_set_phy_int_mask(hw, 0, NULL);
2038         if (rc != I40E_SUCCESS) {
2039                 i40e_error(i40e, "failed to update phy link mask: %d", rc);
2040         }
2041 }
2042 
2043 /*
2044  * Go through and re-initialize any existing filters that we may have set up for
2045  * this device. Note that we would only expect them to exist if hardware had
2046  * already been initialized and we had just reset it. While we're not
2047  * implementing this yet, we're keeping this around for when we add reset
2048  * capabilities, so this isn't forgotten.
2049  */
2050 /* ARGSUSED */
2051 static void
2052 i40e_init_macaddrs(i40e_t *i40e, i40e_hw_t *hw)
2053 {
2054 }
2055 
2056 /*
2057  * Set the properties which have common values across all the VSIs.
2058  * Consult the "Add VSI" command section (7.4.9.5.5.1) for a
2059  * complete description of these properties.
2060  */
2061 static void
2062 i40e_set_shared_vsi_props(i40e_t *i40e,
2063     struct i40e_aqc_vsi_properties_data *info, uint_t vsi_idx)
2064 {
2065         uint_t tc_queues;
2066         uint16_t vsi_qp_base;
2067 
2068         /*
2069          * It's important that we use bitwise-OR here; callers to this
2070          * function might enable other sections before calling this
2071          * function.
2072          */
2073         info->valid_sections |= LE_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID |
2074             I40E_AQ_VSI_PROP_VLAN_VALID);
2075 
2076         /*
2077          * Calculate the starting QP index for this VSI. This base is
2078          * relative to the PF queue space; so a value of 0 for PF#1
2079          * represents the absolute index PFLAN_QALLOC_FIRSTQ for PF#1.
2080          */
2081         vsi_qp_base = vsi_idx * i40e->i40e_num_trqpairs_per_vsi;
2082         info->mapping_flags = LE_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2083         info->queue_mapping[0] =
2084             LE_16((vsi_qp_base << I40E_AQ_VSI_QUEUE_SHIFT) &
2085             I40E_AQ_VSI_QUEUE_MASK);
2086 
2087         /*
2088          * tc_queues determines the size of the traffic class, where
2089          * the size is 2^^tc_queues to a maximum of 64 for the X710
2090          * and 128 for the X722.
2091          *
2092          * Some examples:
2093          *      i40e_num_trqpairs_per_vsi == 1 =>  tc_queues = 0, 2^^0 = 1.
2094          *      i40e_num_trqpairs_per_vsi == 7 =>  tc_queues = 3, 2^^3 = 8.
2095          *      i40e_num_trqpairs_per_vsi == 8 =>  tc_queues = 3, 2^^3 = 8.
2096          *      i40e_num_trqpairs_per_vsi == 9 =>  tc_queues = 4, 2^^4 = 16.
2097          *      i40e_num_trqpairs_per_vsi == 17 => tc_queues = 5, 2^^5 = 32.
2098          *      i40e_num_trqpairs_per_vsi == 64 => tc_queues = 6, 2^^6 = 64.
2099          */
2100         tc_queues = ddi_fls(i40e->i40e_num_trqpairs_per_vsi - 1);
2101 
2102         /*
2103          * The TC queue mapping is in relation to the VSI queue space.
2104          * Since we are only using one traffic class (TC0) we always
2105          * start at queue offset 0.
2106          */
2107         info->tc_mapping[0] =
2108             LE_16(((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) &
2109             I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
2110             ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) &
2111             I40E_AQ_VSI_TC_QUE_NUMBER_MASK));
2112 
2113         /*
2114          * I40E_AQ_VSI_PVLAN_MODE_ALL ("VLAN driver insertion mode")
2115          *
2116          *      Allow tagged and untagged packets to be sent to this
2117          *      VSI from the host.
2118          *
2119          * I40E_AQ_VSI_PVLAN_EMOD_NOTHING ("VLAN and UP expose mode")
2120          *
2121          *      Leave the tag on the frame and place no VLAN
2122          *      information in the descriptor. We want this mode
2123          *      because our MAC layer will take care of the VLAN tag,
2124          *      if there is one.
2125          */
2126         info->port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2127             I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2128 }
2129 
2130 /*
2131  * Delete the VSI at this index, if one exists. We assume there is no
2132  * action we can take if this command fails but to log the failure.
2133  */
2134 static void
2135 i40e_delete_vsi(i40e_t *i40e, uint_t idx)
2136 {
2137         i40e_hw_t       *hw = &i40e->i40e_hw_space;
2138         uint16_t        seid = i40e->i40e_vsis[idx].iv_seid;
2139 
2140         if (seid != 0) {
2141                 int rc;
2142 
2143                 rc = i40e_aq_delete_element(hw, seid, NULL);
2144 
2145                 if (rc != I40E_SUCCESS) {
2146                         i40e_error(i40e, "Failed to delete VSI %d: %d",
2147                             rc, hw->aq.asq_last_status);
2148                 }
2149 
2150                 i40e->i40e_vsis[idx].iv_seid = 0;
2151         }
2152 }
2153 
2154 /*
2155  * Add a new VSI.
2156  */
2157 static boolean_t
2158 i40e_add_vsi(i40e_t *i40e, i40e_hw_t *hw, uint_t idx)
2159 {
2160         struct i40e_vsi_context ctx;
2161         i40e_rx_group_t         *rxg;
2162         int                     rc;
2163 
2164         /*
2165          * The default VSI is created by the controller. This function
2166          * creates new, non-defualt VSIs only.
2167          */
2168         ASSERT3U(idx, !=, 0);
2169 
2170         bzero(&ctx, sizeof (struct i40e_vsi_context));
2171         ctx.uplink_seid = i40e->i40e_veb_seid;
2172         ctx.pf_num = hw->pf_id;
2173         ctx.flags = I40E_AQ_VSI_TYPE_PF;
2174         ctx.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
2175         i40e_set_shared_vsi_props(i40e, &ctx.info, idx);
2176 
2177         rc = i40e_aq_add_vsi(hw, &ctx, NULL);
2178         if (rc != I40E_SUCCESS) {
2179                 i40e_error(i40e, "i40e_aq_add_vsi() failed %d: %d", rc,
2180                     hw->aq.asq_last_status);
2181                 return (B_FALSE);
2182         }
2183 
2184         rxg = &i40e->i40e_rx_groups[idx];
2185         rxg->irg_vsi_seid = ctx.seid;
2186         i40e->i40e_vsis[idx].iv_number = ctx.vsi_number;
2187         i40e->i40e_vsis[idx].iv_seid = ctx.seid;
2188         i40e->i40e_vsis[idx].iv_stats_id = LE_16(ctx.info.stat_counter_idx);
2189 
2190         if (i40e_stat_vsi_init(i40e, idx) == B_FALSE)
2191                 return (B_FALSE);
2192 
2193         return (B_TRUE);
2194 }
2195 
2196 /*
2197  * Configure the hardware for the Default Virtual Station Interface (VSI).
2198  */
2199 static boolean_t
2200 i40e_config_def_vsi(i40e_t *i40e, i40e_hw_t *hw)
2201 {
2202         struct i40e_vsi_context ctx;
2203         i40e_rx_group_t *def_rxg;
2204         int err;
2205         struct i40e_aqc_remove_macvlan_element_data filt;
2206 
2207         bzero(&ctx, sizeof (struct i40e_vsi_context));
2208         ctx.seid = I40E_DEF_VSI_SEID(i40e);
2209         ctx.pf_num = hw->pf_id;
2210         err = i40e_aq_get_vsi_params(hw, &ctx, NULL);
2211         if (err != I40E_SUCCESS) {
2212                 i40e_error(i40e, "get VSI params failed with %d", err);
2213                 return (B_FALSE);
2214         }
2215 
2216         ctx.info.valid_sections = 0;
2217         i40e->i40e_vsis[0].iv_number = ctx.vsi_number;
2218         i40e->i40e_vsis[0].iv_stats_id = LE_16(ctx.info.stat_counter_idx);
2219         if (i40e_stat_vsi_init(i40e, 0) == B_FALSE)
2220                 return (B_FALSE);
2221 
2222         i40e_set_shared_vsi_props(i40e, &ctx.info, I40E_DEF_VSI_IDX);
2223 
2224         err = i40e_aq_update_vsi_params(hw, &ctx, NULL);
2225         if (err != I40E_SUCCESS) {
2226                 i40e_error(i40e, "Update VSI params failed with %d", err);
2227                 return (B_FALSE);
2228         }
2229 
2230         def_rxg = &i40e->i40e_rx_groups[0];
2231         def_rxg->irg_vsi_seid = I40E_DEF_VSI_SEID(i40e);
2232 
2233         /*
2234          * We have seen three different behaviors in regards to the
2235          * Default VSI and its implicit L2 MAC+VLAN filter.
2236          *
2237          * 1. It has an implicit filter for the factory MAC address
2238          *    and this filter counts against 'ifr_nmacfilt_used'.
2239          *
2240          * 2. It has an implicit filter for the factory MAC address
2241          *    and this filter DOES NOT count against 'ifr_nmacfilt_used'.
2242          *
2243          * 3. It DOES NOT have an implicit filter.
2244          *
2245          * All three of these cases are accounted for below. If we
2246          * fail to remove the L2 filter (ENOENT) then we assume there
2247          * wasn't one. Otherwise, if we successfully remove the
2248          * filter, we make sure to update the 'ifr_nmacfilt_used'
2249          * count accordingly.
2250          *
2251          * We remove this filter to prevent duplicate delivery of
2252          * packets destined for the primary MAC address as DLS will
2253          * create the same filter on a non-default VSI for the primary
2254          * MAC client.
2255          *
2256          * If you change the following code please test it across as
2257          * many X700 series controllers and firmware revisions as you
2258          * can.
2259          */
2260         bzero(&filt, sizeof (filt));
2261         bcopy(hw->mac.port_addr, filt.mac_addr, ETHERADDRL);
2262         filt.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2263         filt.vlan_tag = 0;
2264 
2265         ASSERT3U(i40e->i40e_resources.ifr_nmacfilt_used, <=, 1);
2266         i40e_log(i40e, "Num L2 filters: %u",
2267             i40e->i40e_resources.ifr_nmacfilt_used);
2268 
2269         err = i40e_aq_remove_macvlan(hw, I40E_DEF_VSI_SEID(i40e), &filt, 1,
2270             NULL);
2271         if (err == I40E_SUCCESS) {
2272                 i40e_log(i40e,
2273                     "Removed L2 filter from Default VSI with SEID %u",
2274                     I40E_DEF_VSI_SEID(i40e));
2275         } else if (hw->aq.asq_last_status == ENOENT) {
2276                 i40e_log(i40e,
2277                     "No L2 filter for Default VSI with SEID %u",
2278                     I40E_DEF_VSI_SEID(i40e));
2279         } else {
2280                 i40e_error(i40e, "Failed to remove L2 filter from"
2281                     " Default VSI with SEID %u: %d (%d)",
2282                     I40E_DEF_VSI_SEID(i40e), err, hw->aq.asq_last_status);
2283 
2284                 return (B_FALSE);
2285         }
2286 
2287 #if 0
2288         bzero(&filt, sizeof (filt));
2289         bcopy(hw->mac.port_addr, filt.mac_addr, ETHERADDRL);
2290         filt.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2291             I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2292         filt.vlan_tag = 0;
2293 
2294         ASSERT3U(i40e->i40e_resources.ifr_nmacfilt_used, <=, 1);
2295         i40e_log(i40e, "Num L2 filters (2nd try): %u",
2296             i40e->i40e_resources.ifr_nmacfilt_used);
2297 
2298         err = i40e_aq_remove_macvlan(hw, I40E_DEF_VSI_SEID(i40e), &filt, 1,
2299             NULL);
2300         if (err == I40E_SUCCESS) {
2301                 i40e_log(i40e,
2302                     "(2nd try) Removed L2 filter from Default VSI with SEID %u",
2303                     I40E_DEF_VSI_SEID(i40e));
2304         } else if (hw->aq.asq_last_status == ENOENT) {
2305                 i40e_log(i40e,
2306                     "(2nd try) No L2 filter for Default VSI with SEID %u",
2307                     I40E_DEF_VSI_SEID(i40e));
2308         } else {
2309                 i40e_error(i40e, "(2nd try) Failed to remove L2 filter from"
2310                     " Default VSI with SEID %u: %d (%d)",
2311                     I40E_DEF_VSI_SEID(i40e), err, hw->aq.asq_last_status);
2312 
2313                 return (B_FALSE);
2314         }
2315 #endif
2316         /*
2317          *  As mentioned above, the controller created an implicit L2
2318          *  filter for the primary MAC. We want to remove both the
2319          *  filter and decrement the filter count. However, not all
2320          *  controllers count this implicit filter against the total
2321          *  MAC filter count. So here we are making sure it is either
2322          *  one or zero. If it is one, then we know it is for the
2323          *  implicit filter and we should decrement since we just
2324          *  removed the filter above. If it is zero then we know the
2325          *  controller that does not count the implicit filter, and it
2326          *  was enough to just remove it; we leave the count alone.
2327          *  But if it is neither, then we have never seen a controller
2328          *  like this before and we should fail to attach.
2329          *
2330          *  It is unfortunate that this code must exist but the
2331          *  behavior of this implicit L2 filter and its corresponding
2332          *  count were dicovered through empirical testing. The
2333          *  programming manuals hint at this filter but do not
2334          *  explicitly call out the exact behavior.
2335          */
2336         if (i40e->i40e_resources.ifr_nmacfilt_used == 1) {
2337                 i40e->i40e_resources.ifr_nmacfilt_used--;
2338         } else {
2339                 if (i40e->i40e_resources.ifr_nmacfilt_used != 0) {
2340                         i40e_error(i40e, "Unexpected L2 filter count: %u"
2341                             " (expected 0)",
2342                             i40e->i40e_resources.ifr_nmacfilt_used);
2343                         return (B_FALSE);
2344                 }
2345         }
2346 
2347         return (B_TRUE);
2348 }
2349 
2350 static boolean_t
2351 i40e_config_rss_key_x722(i40e_t *i40e, i40e_hw_t *hw)
2352 {
2353         for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) {
2354                 uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1];
2355                 struct i40e_aqc_get_set_rss_key_data key;
2356                 const char *u8seed;
2357                 enum i40e_status_code status;
2358                 uint16_t vsi_number = i40e->i40e_vsis[i].iv_number;
2359 
2360                 (void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed));
2361                 u8seed = (char *)seed;
2362 
2363                 CTASSERT(sizeof (key) >= (sizeof (key.standard_rss_key) +
2364                     sizeof (key.extended_hash_key)));
2365 
2366                 bcopy(u8seed, key.standard_rss_key,
2367                     sizeof (key.standard_rss_key));
2368                 bcopy(&u8seed[sizeof (key.standard_rss_key)],
2369                     key.extended_hash_key, sizeof (key.extended_hash_key));
2370 
2371                 ASSERT3U(vsi_number, !=, 0);
2372                 status = i40e_aq_set_rss_key(hw, vsi_number, &key);
2373 
2374                 if (status != I40E_SUCCESS) {
2375                         i40e_error(i40e, "failed to set RSS key for VSI %u: %d",
2376                             vsi_number, status);
2377                         return (B_FALSE);
2378                 }
2379         }
2380 
2381         return (B_TRUE);
2382 }
2383 
2384 /*
2385  * Configure the RSS key. For the X710 controller family, this is set on a
2386  * per-PF basis via registers. For the X722, this is done on a per-VSI basis
2387  * through the admin queue.
2388  */
2389 static boolean_t
2390 i40e_config_rss_key(i40e_t *i40e, i40e_hw_t *hw)
2391 {
2392         if (i40e_is_x722(i40e)) {
2393                 if (!i40e_config_rss_key_x722(i40e, hw))
2394                         return (B_FALSE);
2395         } else {
2396                 uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1];
2397 
2398                 (void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed));
2399                 for (uint_t i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
2400                         i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed[i]);
2401         }
2402 
2403         return (B_TRUE);
2404 }
2405 
2406 /*
2407  * Populate the LUT. The size of each entry in the LUT depends on the controller
2408  * family, with the X722 using a known 7-bit width. On the X710 controller, this
2409  * is programmed through its control registers where as on the X722 this is
2410  * configured through the admin queue. Also of note, the X722 allows the LUT to
2411  * be set on a per-PF or VSI basis. At this time we use the PF setting. If we
2412  * decide to use the per-VSI LUT in the future, then we will need to modify the
2413  * i40e_add_vsi() function to set the RSS LUT bits in the queueing section.
2414  *
2415  * We populate the LUT in a round robin fashion with the rx queue indices from 0
2416  * to i40e_num_trqpairs_per_vsi - 1.
2417  */
2418 static boolean_t
2419 i40e_config_rss_hlut(i40e_t *i40e, i40e_hw_t *hw)
2420 {
2421         uint32_t *hlut;
2422         uint8_t lut_mask;
2423         uint_t i;
2424         boolean_t ret = B_FALSE;
2425 
2426         /*
2427          * We always configure the PF with a table size of 512 bytes in
2428          * i40e_chip_start().
2429          */
2430         hlut = kmem_alloc(I40E_HLUT_TABLE_SIZE, KM_NOSLEEP);
2431         if (hlut == NULL) {
2432                 i40e_error(i40e, "i40e_config_rss() buffer allocation failed");
2433                 return (B_FALSE);
2434         }
2435 
2436         /*
2437          * The width of the X722 is apparently defined to be 7 bits, regardless
2438          * of the capability.
2439          */
2440         if (i40e_is_x722(i40e)) {
2441                 lut_mask = (1 << 7) - 1;
2442         } else {
2443                 lut_mask = (1 << hw->func_caps.rss_table_entry_width) - 1;
2444         }
2445 
2446         for (i = 0; i < I40E_HLUT_TABLE_SIZE; i++) {
2447                 ((uint8_t *)hlut)[i] =
2448                     (i % i40e->i40e_num_trqpairs_per_vsi) & lut_mask;
2449         }
2450 
2451         if (i40e_is_x722(i40e)) {
2452                 enum i40e_status_code status;
2453 
2454                 status = i40e_aq_set_rss_lut(hw, 0, B_TRUE, (uint8_t *)hlut,
2455                     I40E_HLUT_TABLE_SIZE);
2456 
2457                 if (status != I40E_SUCCESS) {
2458                         i40e_error(i40e, "failed to set RSS LUT %d: %d",
2459                             status, hw->aq.asq_last_status);
2460                         goto out;
2461                 }
2462         } else {
2463                 for (i = 0; i < I40E_HLUT_TABLE_SIZE >> 2; i++) {
2464                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), hlut[i]);
2465                 }
2466         }
2467         ret = B_TRUE;
2468 out:
2469         kmem_free(hlut, I40E_HLUT_TABLE_SIZE);
2470         return (ret);
2471 }
2472 
2473 /*
2474  * Set up RSS.
2475  *      1. Seed the hash key.
2476  *      2. Enable PCTYPEs for the hash filter.
2477  *      3. Populate the LUT.
2478  */
2479 static boolean_t
2480 i40e_config_rss(i40e_t *i40e, i40e_hw_t *hw)
2481 {
2482         uint64_t hena;
2483 
2484         /*
2485          * 1. Seed the hash key
2486          */
2487         if (!i40e_config_rss_key(i40e, hw))
2488                 return (B_FALSE);
2489 
2490         /*
2491          * 2. Configure PCTYPES
2492          */
2493         hena = (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2494             (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2495             (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2496             (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2497             (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2498             (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2499             (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2500             (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2501             (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2502             (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2503             (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2504 
2505         /*
2506          * Add additional types supported by the X722 controller.
2507          */
2508         if (i40e_is_x722(i40e)) {
2509                 hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
2510                     (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
2511                     (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) |
2512                     (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
2513                     (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
2514                     (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
2515         }
2516 
2517         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
2518         i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
2519 
2520         /*
2521          * 3. Populate LUT
2522          */
2523         return (i40e_config_rss_hlut(i40e, hw));
2524 }
2525 
2526 /*
2527  * Wrapper to kick the chipset on.
2528  */
2529 static boolean_t
2530 i40e_chip_start(i40e_t *i40e)
2531 {
2532         i40e_hw_t *hw = &i40e->i40e_hw_space;
2533         struct i40e_filter_control_settings filter;
2534         int rc;
2535         uint8_t err;
2536 
2537         if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
2538             (hw->aq.fw_maj_ver < 4)) {
2539                 i40e_msec_delay(75);
2540                 if (i40e_aq_set_link_restart_an(hw, TRUE, NULL) !=
2541                     I40E_SUCCESS) {
2542                         i40e_error(i40e, "failed to restart link: admin queue "
2543                             "error: %d", hw->aq.asq_last_status);
2544                         return (B_FALSE);
2545                 }
2546         }
2547 
2548         /* Determine hardware state */
2549         i40e_get_hw_state(i40e, hw);
2550 
2551         /* For now, we always disable Ethernet Flow Control. */
2552         hw->fc.requested_mode = I40E_FC_NONE;
2553         rc = i40e_set_fc(hw, &err, B_TRUE);
2554         if (rc != I40E_SUCCESS) {
2555                 i40e_error(i40e, "Setting flow control failed, returned %d"
2556                     " with error: 0x%x", rc, err);
2557                 return (B_FALSE);
2558         }
2559 
2560         /* Initialize mac addresses. */
2561         i40e_init_macaddrs(i40e, hw);
2562 
2563         /*
2564          * Set up the filter control. If the hash lut size is changed from
2565          * I40E_HASH_LUT_SIZE_512 then I40E_HLUT_TABLE_SIZE and
2566          * i40e_config_rss_hlut() will need to be updated.
2567          */
2568         bzero(&filter, sizeof (filter));
2569         filter.enable_ethtype = TRUE;
2570         filter.enable_macvlan = TRUE;
2571         filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
2572 
2573         rc = i40e_set_filter_control(hw, &filter);
2574         if (rc != I40E_SUCCESS) {
2575                 i40e_error(i40e, "i40e_set_filter_control() returned %d", rc);
2576                 return (B_FALSE);
2577         }
2578 
2579         i40e_intr_chip_init(i40e);
2580 
2581         rc = i40e_get_mac_seid(i40e);
2582         if (rc == -1) {
2583                 i40e_error(i40e, "failed to obtain MAC Uplink SEID");
2584                 return (B_FALSE);
2585         }
2586         i40e->i40e_mac_seid = (uint16_t)rc;
2587 
2588         /*
2589          * Create a VEB in order to support multiple VSIs. Each VSI
2590          * functions as a MAC group. This call sets the PF's MAC as
2591          * the uplink port and the PF's default VSI as the default
2592          * downlink port.
2593          */
2594         rc = i40e_aq_add_veb(hw, i40e->i40e_mac_seid, I40E_DEF_VSI_SEID(i40e),
2595             0x1, B_TRUE, &i40e->i40e_veb_seid, B_FALSE, NULL);
2596         if (rc != I40E_SUCCESS) {
2597                 i40e_error(i40e, "i40e_aq_add_veb() failed %d: %d", rc,
2598                     hw->aq.asq_last_status);
2599                 return (B_FALSE);
2600         }
2601 
2602         if (!i40e_config_def_vsi(i40e, hw))
2603                 return (B_FALSE);
2604 
2605         for (uint_t i = 1; i < i40e->i40e_num_rx_groups; i++) {
2606                 if (!i40e_add_vsi(i40e, hw, i))
2607                         return (B_FALSE);
2608         }
2609 
2610         if (!i40e_config_rss(i40e, hw))
2611                 return (B_FALSE);
2612 
2613         i40e_flush(hw);
2614 
2615         return (B_TRUE);
2616 }
2617 
2618 /*
2619  * Take care of tearing down the rx ring. See 8.3.3.1.2 for more information.
2620  */
2621 static void
2622 i40e_shutdown_rx_ring(i40e_trqpair_t *itrq)
2623 {
2624         i40e_t *i40e = itrq->itrq_i40e;
2625         i40e_hw_t *hw = &i40e->i40e_hw_space;
2626         uint32_t reg;
2627 
2628         /*
2629          * Step 1. 8.3.3.1.2 suggests the interrupt is removed from the
2630          * hardware interrupt linked list (see i40e_intr.c) but for
2631          * simplicity we keep this list immutable until the device
2632          * (distinct from an individual ring) is stopped.
2633          */
2634 
2635         /*
2636          * Step 2. Request the queue by clearing QENA_REQ. It may not be
2637          * set due to unwinding from failures and a partially enabled
2638          * ring set.
2639          */
2640         reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
2641         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK))
2642                 return;
2643         VERIFY((reg & I40E_QRX_ENA_QENA_REQ_MASK) ==
2644             I40E_QRX_ENA_QENA_REQ_MASK);
2645         reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2646         I40E_WRITE_REG(hw, I40E_QRX_ENA(itrq->itrq_index), reg);
2647 
2648         /*
2649          * Step 3. Wait for the disable to take, by having QENA_STAT in the FPM
2650          * be cleared. Note that we could still receive data in the queue during
2651          * this time. We don't actually wait for this now and instead defer this
2652          * to i40e_shutdown_ring_wait(), after we've interleaved disabling the
2653          * TX queue as well.
2654          */
2655 }
2656 
2657 static void
2658 i40e_shutdown_tx_ring(i40e_trqpair_t *itrq)
2659 {
2660         i40e_t *i40e = itrq->itrq_i40e;
2661         i40e_hw_t *hw = &i40e->i40e_hw_space;
2662         uint32_t reg;
2663 
2664         /*
2665          * Step 2. Set the SET_QDIS flag for the queue.
2666          */
2667         i40e_pre_tx_queue_cfg(hw, itrq->itrq_index, B_FALSE);
2668 
2669         /*
2670          * Step 3. Wait at least 400 usec.
2671          */
2672         drv_usecwait(500);
2673 
2674         /*
2675          * Step 4. Clear the QENA_REQ flag which tells hardware to
2676          * quiesce. If QENA_REQ is not already set then that means that
2677          * we likely already tried to disable this queue.
2678          */
2679         reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
2680         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) != 0) {
2681                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2682                 I40E_WRITE_REG(hw, I40E_QTX_ENA(itrq->itrq_index), reg);
2683         }
2684 
2685         /*
2686          * Step 5. Wait for the drain to finish. This will be done by the
2687          * hardware removing the QENA_STAT flag from the queue. Rather than
2688          * waiting here, we interleave it with the receive shutdown in
2689          * i40e_shutdown_ring_wait().
2690          */
2691 }
2692 
2693 /*
2694  * Wait for a ring to be shut down. e.g. Steps 2 and 5 from the above
2695  * functions.
2696  */
2697 static boolean_t
2698 i40e_shutdown_ring_wait(i40e_trqpair_t *itrq)
2699 {
2700         i40e_t *i40e = itrq->itrq_i40e;
2701         i40e_hw_t *hw = &i40e->i40e_hw_space;
2702         uint32_t reg;
2703         int try;
2704 
2705         for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) {
2706                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
2707                 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2708                         break;
2709                 i40e_msec_delay(I40E_RING_WAIT_PAUSE);
2710         }
2711 
2712         if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) != 0) {
2713                 i40e_error(i40e, "timed out disabling rx queue %d",
2714                     itrq->itrq_index);
2715                 return (B_FALSE);
2716         }
2717 
2718         for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) {
2719                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
2720                 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2721                         break;
2722                 i40e_msec_delay(I40E_RING_WAIT_PAUSE);
2723         }
2724 
2725         if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) != 0) {
2726                 i40e_error(i40e, "timed out disabling tx queue %d",
2727                     itrq->itrq_index);
2728                 return (B_FALSE);
2729         }
2730 
2731         return (B_TRUE);
2732 }
2733 
2734 
2735 /*
2736  * Shutdown an individual ring and release any memory.
2737  */
2738 boolean_t
2739 i40e_shutdown_ring(i40e_trqpair_t *itrq)
2740 {
2741         boolean_t rv = B_TRUE;
2742 
2743         /*
2744          * Tell transmit path to quiesce, and wait until done.
2745          */
2746         if (i40e_ring_tx_quiesce(itrq)) {
2747                 /* Already quiesced. */
2748                 return (B_TRUE);
2749         }
2750 
2751         i40e_shutdown_rx_ring(itrq);
2752         i40e_shutdown_tx_ring(itrq);
2753         if (!i40e_shutdown_ring_wait(itrq))
2754                 rv = B_FALSE;
2755 
2756         /*
2757          * After the ring has stopped, we need to wait 50ms before
2758          * programming it again. Rather than wait here, we'll record
2759          * the time the ring was stopped. When the ring is started, we'll
2760          * check if enough time has expired and then wait if necessary.
2761          */
2762         itrq->irtq_time_stopped = gethrtime();
2763 
2764         /*
2765          * The rings have been stopped in the hardware, now wait for
2766          * a possibly active interrupt thread.
2767          */
2768         i40e_intr_quiesce(itrq);
2769 
2770         mutex_enter(&itrq->itrq_tx_lock);
2771         i40e_tx_cleanup_ring(itrq);
2772         mutex_exit(&itrq->itrq_tx_lock);
2773 
2774         i40e_free_ring_mem(itrq, B_FALSE);
2775 
2776         return (rv);
2777 }
2778 
2779 /*
2780  * Shutdown all the rings.
2781  * Called from i40e_stop(), and hopefully the mac layer has already
2782  * called ring stop for each ring, which would make this almost a no-op.
2783  */
2784 static boolean_t
2785 i40e_shutdown_rings(i40e_t *i40e)
2786 {
2787         boolean_t rv = B_TRUE;
2788         int i;
2789 
2790         for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
2791                 if (!i40e_shutdown_ring(&i40e->i40e_trqpairs[i]))
2792                         rv = B_FALSE;
2793         }
2794 
2795         return (rv);
2796 }
2797 
2798 static void
2799 i40e_setup_rx_descs(i40e_trqpair_t *itrq)
2800 {
2801         int i;
2802         i40e_rx_data_t *rxd = itrq->itrq_rxdata;
2803 
2804         for (i = 0; i < rxd->rxd_ring_size; i++) {
2805                 i40e_rx_control_block_t *rcb;
2806                 i40e_rx_desc_t *rdesc;
2807 
2808                 rcb = rxd->rxd_work_list[i];
2809                 rdesc = &rxd->rxd_desc_ring[i];
2810 
2811                 rdesc->read.pkt_addr =
2812                     CPU_TO_LE64((uintptr_t)rcb->rcb_dma.dmab_dma_address);
2813                 rdesc->read.hdr_addr = 0;
2814         }
2815 }
2816 
2817 static boolean_t
2818 i40e_setup_rx_hmc(i40e_trqpair_t *itrq)
2819 {
2820         i40e_rx_data_t *rxd = itrq->itrq_rxdata;
2821         i40e_t *i40e = itrq->itrq_i40e;
2822         i40e_hw_t *hw = &i40e->i40e_hw_space;
2823 
2824         struct i40e_hmc_obj_rxq rctx;
2825         int err;
2826 
2827         bzero(&rctx, sizeof (struct i40e_hmc_obj_rxq));
2828         rctx.base = rxd->rxd_desc_area.dmab_dma_address /
2829             I40E_HMC_RX_CTX_UNIT;
2830         rctx.qlen = rxd->rxd_ring_size;
2831         VERIFY(i40e->i40e_rx_buf_size >= I40E_HMC_RX_DBUFF_MIN);
2832         VERIFY(i40e->i40e_rx_buf_size <= I40E_HMC_RX_DBUFF_MAX);
2833         rctx.dbuff = i40e->i40e_rx_buf_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
2834         rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2835         rctx.dtype = I40E_HMC_RX_DTYPE_NOSPLIT;
2836         rctx.dsize = I40E_HMC_RX_DSIZE_32BYTE;
2837         rctx.crcstrip = I40E_HMC_RX_CRCSTRIP_ENABLE;
2838         rctx.fc_ena = I40E_HMC_RX_FC_DISABLE;
2839         rctx.l2tsel = I40E_HMC_RX_L2TAGORDER;
2840         rctx.hsplit_0 = I40E_HMC_RX_HDRSPLIT_DISABLE;
2841         rctx.hsplit_1 = I40E_HMC_RX_HDRSPLIT_DISABLE;
2842         rctx.showiv = I40E_HMC_RX_INVLAN_DONTSTRIP;
2843         rctx.rxmax = i40e->i40e_frame_max;
2844         rctx.tphrdesc_ena = I40E_HMC_RX_TPH_DISABLE;
2845         rctx.tphwdesc_ena = I40E_HMC_RX_TPH_DISABLE;
2846         rctx.tphdata_ena = I40E_HMC_RX_TPH_DISABLE;
2847         rctx.tphhead_ena = I40E_HMC_RX_TPH_DISABLE;
2848         rctx.lrxqthresh = I40E_HMC_RX_LOWRXQ_NOINTR;
2849 
2850         /*
2851          * This must be set to 0x1, see Table 8-12 in section 8.3.3.2.2.
2852          */
2853         rctx.prefena = I40E_HMC_RX_PREFENA;
2854 
2855         err = i40e_clear_lan_rx_queue_context(hw, itrq->itrq_index);
2856         if (err != I40E_SUCCESS) {
2857                 i40e_error(i40e, "failed to clear rx queue %d context: %d",
2858                     itrq->itrq_index, err);
2859                 return (B_FALSE);
2860         }
2861 
2862         err = i40e_set_lan_rx_queue_context(hw, itrq->itrq_index, &rctx);
2863         if (err != I40E_SUCCESS) {
2864                 i40e_error(i40e, "failed to set rx queue %d context: %d",
2865                     itrq->itrq_index, err);
2866                 return (B_FALSE);
2867         }
2868 
2869         return (B_TRUE);
2870 }
2871 
2872 /*
2873  * Take care of setting up the descriptor ring and actually programming the
2874  * device. See 8.3.3.1.1 for the full list of steps we need to do to enable the
2875  * rx rings.
2876  */
2877 static boolean_t
2878 i40e_setup_rx_ring(i40e_trqpair_t *itrq)
2879 {
2880         i40e_t *i40e = itrq->itrq_i40e;
2881         i40e_hw_t *hw = &i40e->i40e_hw_space;
2882         i40e_rx_data_t *rxd = itrq->itrq_rxdata;
2883         uint32_t reg;
2884         int i;
2885 
2886         /*
2887          * Step 1. Program all receive ring descriptors.
2888          */
2889         i40e_setup_rx_descs(itrq);
2890 
2891         /*
2892          * Step 2. Program the queue's FPM/HMC context.
2893          */
2894         if (!i40e_setup_rx_hmc(itrq))
2895                 return (B_FALSE);
2896 
2897         /*
2898          * Step 3. Clear the queue's tail pointer and set it to the end
2899          * of the space.
2900          */
2901         I40E_WRITE_REG(hw, I40E_QRX_TAIL(itrq->itrq_index), 0);
2902         I40E_WRITE_REG(hw, I40E_QRX_TAIL(itrq->itrq_index),
2903             rxd->rxd_ring_size - 1);
2904 
2905         /*
2906          * Step 4. Enable the queue via the QENA_REQ.
2907          */
2908         reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
2909         VERIFY0(reg & (I40E_QRX_ENA_QENA_REQ_MASK |
2910             I40E_QRX_ENA_QENA_STAT_MASK));
2911         reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2912         I40E_WRITE_REG(hw, I40E_QRX_ENA(itrq->itrq_index), reg);
2913 
2914         /*
2915          * Step 5. Verify that QENA_STAT has been set. It's promised
2916          * that this should occur within about 10 us, but like other
2917          * systems, we give the card a bit more time.
2918          */
2919         for (i = 0; i < I40E_RING_WAIT_NTRIES; i++) {
2920                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
2921 
2922                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2923                         break;
2924                 i40e_msec_delay(I40E_RING_WAIT_PAUSE);
2925         }
2926 
2927         if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2928                 i40e_error(i40e, "failed to enable rx queue %d, timed "
2929                     "out.", itrq->itrq_index);
2930                 return (B_FALSE);
2931         }
2932 
2933         return (B_TRUE);
2934 }
2935 
2936 static boolean_t
2937 i40e_setup_tx_hmc(i40e_trqpair_t *itrq)
2938 {
2939         i40e_t *i40e = itrq->itrq_i40e;
2940         i40e_hw_t *hw = &i40e->i40e_hw_space;
2941 
2942         struct i40e_hmc_obj_txq tctx;
2943         struct i40e_vsi_context context;
2944         int err;
2945 
2946         bzero(&tctx, sizeof (struct i40e_hmc_obj_txq));
2947         tctx.new_context = I40E_HMC_TX_NEW_CONTEXT;
2948         tctx.base = itrq->itrq_desc_area.dmab_dma_address /
2949             I40E_HMC_TX_CTX_UNIT;
2950         tctx.fc_ena = I40E_HMC_TX_FC_DISABLE;
2951         tctx.timesync_ena = I40E_HMC_TX_TS_DISABLE;
2952         tctx.fd_ena = I40E_HMC_TX_FD_DISABLE;
2953         tctx.alt_vlan_ena = I40E_HMC_TX_ALT_VLAN_DISABLE;
2954         tctx.head_wb_ena = I40E_HMC_TX_WB_ENABLE;
2955         tctx.qlen = itrq->itrq_tx_ring_size;
2956         tctx.tphrdesc_ena = I40E_HMC_TX_TPH_DISABLE;
2957         tctx.tphrpacket_ena = I40E_HMC_TX_TPH_DISABLE;
2958         tctx.tphwdesc_ena = I40E_HMC_TX_TPH_DISABLE;
2959         tctx.head_wb_addr = itrq->itrq_desc_area.dmab_dma_address +
2960             sizeof (i40e_tx_desc_t) * itrq->itrq_tx_ring_size;
2961 
2962         /*
2963          * This field isn't actually documented, like crc, but it suggests that
2964          * it should be zeroed. We leave both of these here because of that for
2965          * now. We should check with Intel on why these are here even.
2966          */
2967         tctx.crc = 0;
2968         tctx.rdylist_act = 0;
2969 
2970         /*
2971          * We're supposed to assign the rdylist field with the value of the
2972          * traffic class index for the first device. We query the VSI parameters
2973          * again to get what the handle is. Note that every queue is always
2974          * assigned to traffic class zero, because we don't actually use them.
2975          */
2976         bzero(&context, sizeof (struct i40e_vsi_context));
2977         context.seid = I40E_DEF_VSI_SEID(i40e);
2978         context.pf_num = hw->pf_id;
2979         err = i40e_aq_get_vsi_params(hw, &context, NULL);
2980         if (err != I40E_SUCCESS) {
2981                 i40e_error(i40e, "get VSI params failed with %d", err);
2982                 return (B_FALSE);
2983         }
2984         tctx.rdylist = LE_16(context.info.qs_handle[0]);
2985 
2986         err = i40e_clear_lan_tx_queue_context(hw, itrq->itrq_index);
2987         if (err != I40E_SUCCESS) {
2988                 i40e_error(i40e, "failed to clear tx queue %d context: %d",
2989                     itrq->itrq_index, err);
2990                 return (B_FALSE);
2991         }
2992 
2993         err = i40e_set_lan_tx_queue_context(hw, itrq->itrq_index, &tctx);
2994         if (err != I40E_SUCCESS) {
2995                 i40e_error(i40e, "failed to set tx queue %d context: %d",
2996                     itrq->itrq_index, err);
2997                 return (B_FALSE);
2998         }
2999 
3000         return (B_TRUE);
3001 }
3002 
3003 /*
3004  * Take care of setting up the descriptor ring and actually programming the
3005  * device. See 8.4.3.1.1 for what we need to do here.
3006  */
3007 static boolean_t
3008 i40e_setup_tx_ring(i40e_trqpair_t *itrq)
3009 {
3010         i40e_t *i40e = itrq->itrq_i40e;
3011         i40e_hw_t *hw = &i40e->i40e_hw_space;
3012         uint32_t reg;
3013         int i;
3014 
3015         /*
3016          * Step 1. Clear the queue disable flag and verify that the
3017          * index is set correctly.
3018          */
3019         i40e_pre_tx_queue_cfg(hw, itrq->itrq_index, B_TRUE);
3020 
3021         /*
3022          * Step 2. Prepare the queue's FPM/HMC context.
3023          */
3024         if (!i40e_setup_tx_hmc(itrq))
3025                 return (B_FALSE);
3026 
3027         /*
3028          * Step 3. Verify that it's clear that this PF owns this queue.
3029          */
3030         reg = I40E_QTX_CTL_PF_QUEUE;
3031         reg |= (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3032             I40E_QTX_CTL_PF_INDX_MASK;
3033         I40E_WRITE_REG(hw, I40E_QTX_CTL(itrq->itrq_index), reg);
3034         i40e_flush(hw);
3035 
3036         /*
3037          * Step 4. Set the QENA_REQ flag.
3038          */
3039         reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
3040         VERIFY0(reg & (I40E_QTX_ENA_QENA_REQ_MASK |
3041             I40E_QTX_ENA_QENA_STAT_MASK));
3042         reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3043         I40E_WRITE_REG(hw, I40E_QTX_ENA(itrq->itrq_index), reg);
3044 
3045         /*
3046          * Step 5. Verify that QENA_STAT has been set. It's promised
3047          * that this should occur within about 10 us, but like BSD,
3048          * we'll try for up to 100 ms for this queue.
3049          */
3050         for (i = 0; i < I40E_RING_WAIT_NTRIES; i++) {
3051                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
3052 
3053                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3054                         break;
3055                 i40e_msec_delay(I40E_RING_WAIT_PAUSE);
3056         }
3057 
3058         if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3059                 i40e_error(i40e, "failed to enable tx queue %d, timed "
3060                     "out", itrq->itrq_index);
3061                 return (B_FALSE);
3062         }
3063 
3064         return (B_TRUE);
3065 }
3066 
3067 int
3068 i40e_setup_ring(i40e_trqpair_t *itrq)
3069 {
3070         i40e_t *i40e = itrq->itrq_i40e;
3071         hrtime_t now, gap;
3072 
3073         if (!i40e_alloc_ring_mem(itrq)) {
3074                 i40e_error(i40e, "Failed to allocate ring memory");
3075                 return (ENOMEM);
3076         }
3077 
3078         /*
3079          * 8.3.3.1.1 Receive Queue Enable Flow states software should
3080          * wait at least 50ms between ring disable and enable. See how
3081          * long we need to wait, and wait only if required.
3082          */
3083         now = gethrtime();
3084         gap = NSEC2MSEC(now - itrq->irtq_time_stopped);
3085         if (gap < I40E_RING_ENABLE_GAP && gap != 0)
3086                 delay(drv_usectohz(gap * 1000));
3087 
3088         mutex_enter(&itrq->itrq_intr_lock);
3089         if (!i40e_setup_rx_ring(itrq))
3090                 goto failed;
3091 
3092         if (!i40e_setup_tx_ring(itrq))
3093                 goto failed;
3094 
3095         if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
3096             DDI_FM_OK)
3097                 goto failed;
3098 
3099         itrq->itrq_intr_quiesce = B_FALSE;
3100         mutex_exit(&itrq->itrq_intr_lock);
3101 
3102         mutex_enter(&itrq->itrq_tx_lock);
3103         itrq->itrq_tx_quiesce = B_FALSE;
3104         mutex_exit(&itrq->itrq_tx_lock);
3105 
3106         return (0);
3107 
3108 failed:
3109         mutex_exit(&itrq->itrq_intr_lock);
3110         i40e_free_ring_mem(itrq, B_TRUE);
3111         ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3112 
3113         return (EIO);
3114 }
3115 
3116 void
3117 i40e_stop(i40e_t *i40e)
3118 {
3119         uint_t i;
3120         i40e_hw_t *hw = &i40e->i40e_hw_space;
3121 
3122         ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
3123 
3124         /*
3125          * Shutdown and drain the tx and rx pipeline. We do this using the
3126          * following steps.
3127          *
3128          * 1) Shutdown interrupts to all the queues (trying to keep the admin
3129          *    queue alive).
3130          *
3131          * 2) Remove all of the interrupt tx and rx causes by setting the
3132          *    interrupt linked lists to zero.
3133          *
3134          * 2) Shutdown the tx and rx rings. Because i40e_shutdown_rings() should
3135          *    wait for all the queues to be disabled, once we reach that point
3136          *    it should be safe to free associated data.
3137          *
3138          * 4) Wait 50ms after all that is done. This ensures that the rings are
3139          *    ready for programming again and we don't have to think about this
3140          *    in other parts of the driver.
3141          *
3142          * 5) Disable remaining chip interrupts, (admin queue, etc.)
3143          *
3144          * 6) Verify that FM is happy with all the register accesses we
3145          *    performed.
3146          */
3147         i40e_intr_io_disable_all(i40e);
3148         i40e_intr_io_clear_cause(i40e);
3149 
3150         if (!i40e_shutdown_rings(i40e))
3151                 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3152 
3153         /*
3154          * We don't delete the default VSI because it replaces the VEB
3155          * after VEB deletion (see the "Delete Element" section).
3156          * Furthermore, since the default VSI is provided by the
3157          * firmware, we never attempt to delete it.
3158          */
3159         for (i = 1; i < i40e->i40e_num_rx_groups; i++) {
3160                 i40e_delete_vsi(i40e, i);
3161         }
3162 
3163         if (i40e->i40e_veb_seid != 0) {
3164                 int rc = i40e_aq_delete_element(hw, i40e->i40e_veb_seid, NULL);
3165 
3166                 if (rc != I40E_SUCCESS) {
3167                         i40e_error(i40e, "Failed to delete VEB %d: %d", rc,
3168                             hw->aq.asq_last_status);
3169                 }
3170 
3171                 i40e->i40e_veb_seid = 0;
3172         }
3173 
3174         i40e_intr_chip_fini(i40e);
3175 
3176         if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) !=
3177             DDI_FM_OK) {
3178                 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3179         }
3180 
3181         for (i = 0; i < i40e->i40e_num_rx_groups; i++) {
3182                 i40e_stat_vsi_fini(i40e, i);
3183         }
3184 
3185         i40e->i40e_link_speed = 0;
3186         i40e->i40e_link_duplex = 0;
3187         i40e_link_state_set(i40e, LINK_STATE_UNKNOWN);
3188 }
3189 
3190 boolean_t
3191 i40e_start(i40e_t *i40e)
3192 {
3193         i40e_hw_t *hw = &i40e->i40e_hw_space;
3194         boolean_t rc = B_TRUE;
3195         int err;
3196 
3197         ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
3198 
3199         if (!i40e_chip_start(i40e)) {
3200                 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE);
3201                 rc = B_FALSE;
3202                 goto done;
3203         }
3204 
3205         /*
3206          * Enable broadcast traffic; however, do not enable multicast traffic.
3207          * That's handle exclusively through MAC's mc_multicst routines.
3208          */
3209         err = i40e_aq_set_vsi_broadcast(hw, I40E_DEF_VSI_SEID(i40e), B_TRUE,
3210             NULL);
3211         if (err != I40E_SUCCESS) {
3212                 i40e_error(i40e, "failed to set default VSI: %d", err);
3213                 rc = B_FALSE;
3214                 goto done;
3215         }
3216 
3217         err = i40e_aq_set_mac_config(hw, i40e->i40e_frame_max, B_TRUE, 0,
3218             B_FALSE, NULL);
3219         if (err != I40E_SUCCESS) {
3220                 i40e_error(i40e, "failed to set MAC config: %d", err);
3221                 rc = B_FALSE;
3222                 goto done;
3223         }
3224 
3225         /*
3226          * Finally, make sure that we're happy from an FM perspective.
3227          */
3228         if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
3229             DDI_FM_OK) {
3230                 rc = B_FALSE;
3231                 goto done;
3232         }
3233 
3234         /* Clear state bits prior to final interrupt enabling. */
3235         atomic_and_32(&i40e->i40e_state,
3236             ~(I40E_ERROR | I40E_STALL | I40E_OVERTEMP));
3237 
3238         i40e_intr_io_enable_all(i40e);
3239 
3240 done:
3241         if (rc == B_FALSE) {
3242                 i40e_stop(i40e);
3243                 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3244         }
3245 
3246         return (rc);
3247 }
3248 
3249 /*
3250  * We may have loaned up descriptors to the stack. As such, if we still have
3251  * them outstanding, then we will not continue with detach.
3252  */
3253 static boolean_t
3254 i40e_drain_rx(i40e_t *i40e)
3255 {
3256         mutex_enter(&i40e->i40e_rx_pending_lock);
3257         while (i40e->i40e_rx_pending > 0) {
3258                 if (cv_reltimedwait(&i40e->i40e_rx_pending_cv,
3259                     &i40e->i40e_rx_pending_lock,
3260                     drv_usectohz(I40E_DRAIN_RX_WAIT), TR_CLOCK_TICK) == -1) {
3261                         mutex_exit(&i40e->i40e_rx_pending_lock);
3262                         return (B_FALSE);
3263                 }
3264         }
3265         mutex_exit(&i40e->i40e_rx_pending_lock);
3266 
3267         return (B_TRUE);
3268 }
3269 
3270 /*
3271  * DDI UFM Callbacks
3272  */
3273 static int
3274 i40e_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
3275     ddi_ufm_image_t *img)
3276 {
3277         if (imgno != 0)
3278                 return (EINVAL);
3279 
3280         ddi_ufm_image_set_desc(img, "Firmware");
3281         ddi_ufm_image_set_nslots(img, 1);
3282 
3283         return (0);
3284 }
3285 
3286 static int
3287 i40e_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
3288     uint_t slotno, ddi_ufm_slot_t *slot)
3289 {
3290         i40e_t *i40e = (i40e_t *)arg;
3291         char *fw_ver = NULL, *fw_bld = NULL, *api_ver = NULL;
3292         nvlist_t *misc = NULL;
3293         uint_t flags = DDI_PROP_DONTPASS;
3294         int err;
3295 
3296         if (imgno != 0 || slotno != 0 ||
3297             ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
3298             "firmware-version", &fw_ver) != DDI_PROP_SUCCESS ||
3299             ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
3300             "firmware-build", &fw_bld) != DDI_PROP_SUCCESS ||
3301             ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
3302             "api-version", &api_ver) != DDI_PROP_SUCCESS) {
3303                 err = EINVAL;
3304                 goto err;
3305         }
3306 
3307         ddi_ufm_slot_set_attrs(slot, DDI_UFM_ATTR_ACTIVE);
3308         ddi_ufm_slot_set_version(slot, fw_ver);
3309 
3310         (void) nvlist_alloc(&misc, NV_UNIQUE_NAME, KM_SLEEP);
3311         if ((err = nvlist_add_string(misc, "firmware-build", fw_bld)) != 0 ||
3312             (err = nvlist_add_string(misc, "api-version", api_ver)) != 0) {
3313                 goto err;
3314         }
3315         ddi_ufm_slot_set_misc(slot, misc);
3316 
3317         ddi_prop_free(fw_ver);
3318         ddi_prop_free(fw_bld);
3319         ddi_prop_free(api_ver);
3320 
3321         return (0);
3322 err:
3323         nvlist_free(misc);
3324         if (fw_ver != NULL)
3325                 ddi_prop_free(fw_ver);
3326         if (fw_bld != NULL)
3327                 ddi_prop_free(fw_bld);
3328         if (api_ver != NULL)
3329                 ddi_prop_free(api_ver);
3330 
3331         return (err);
3332 }
3333 
3334 static int
3335 i40e_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
3336 {
3337         *caps = DDI_UFM_CAP_REPORT;
3338 
3339         return (0);
3340 }
3341 
3342 static ddi_ufm_ops_t i40e_ufm_ops = {
3343         NULL,
3344         i40e_ufm_fill_image,
3345         i40e_ufm_fill_slot,
3346         i40e_ufm_getcaps
3347 };
3348 
3349 static int
3350 i40e_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
3351 {
3352         i40e_t *i40e;
3353         struct i40e_osdep *osdep;
3354         i40e_hw_t *hw;
3355         int instance;
3356 
3357         if (cmd != DDI_ATTACH)
3358                 return (DDI_FAILURE);
3359 
3360         instance = ddi_get_instance(devinfo);
3361         i40e = kmem_zalloc(sizeof (i40e_t), KM_SLEEP);
3362 
3363         i40e->i40e_aqbuf = kmem_zalloc(I40E_ADMINQ_BUFSZ, KM_SLEEP);
3364         i40e->i40e_instance = instance;
3365         i40e->i40e_dip = devinfo;
3366 
3367         hw = &i40e->i40e_hw_space;
3368         osdep = &i40e->i40e_osdep_space;
3369         hw->back = osdep;
3370         osdep->ios_i40e = i40e;
3371 
3372         ddi_set_driver_private(devinfo, i40e);
3373 
3374         i40e_fm_init(i40e);
3375         i40e->i40e_attach_progress |= I40E_ATTACH_FM_INIT;
3376 
3377         if (pci_config_setup(devinfo, &osdep->ios_cfg_handle) != DDI_SUCCESS) {
3378                 i40e_error(i40e, "Failed to map PCI configurations.");
3379                 goto attach_fail;
3380         }
3381         i40e->i40e_attach_progress |= I40E_ATTACH_PCI_CONFIG;
3382 
3383         i40e_identify_hardware(i40e);
3384 
3385         if (!i40e_regs_map(i40e)) {
3386                 i40e_error(i40e, "Failed to map device registers.");
3387                 goto attach_fail;
3388         }
3389         i40e->i40e_attach_progress |= I40E_ATTACH_REGS_MAP;
3390 
3391         i40e_init_properties(i40e);
3392         i40e->i40e_attach_progress |= I40E_ATTACH_PROPS;
3393 
3394         if (!i40e_common_code_init(i40e, hw))
3395                 goto attach_fail;
3396         i40e->i40e_attach_progress |= I40E_ATTACH_COMMON_CODE;
3397 
3398         /*
3399          * When we participate in IRM, we should make sure that we register
3400          * ourselves with it before callbacks.
3401          */
3402         if (!i40e_alloc_intrs(i40e, devinfo)) {
3403                 i40e_error(i40e, "Failed to allocate interrupts.");
3404                 goto attach_fail;
3405         }
3406         i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_INTR;
3407 
3408         if (!i40e_alloc_trqpairs(i40e)) {
3409                 i40e_error(i40e,
3410                     "Failed to allocate receive & transmit rings.");
3411                 goto attach_fail;
3412         }
3413         i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_RINGSLOCKS;
3414 
3415         if (!i40e_map_intrs_to_vectors(i40e)) {
3416                 i40e_error(i40e, "Failed to map interrupts to vectors.");
3417                 goto attach_fail;
3418         }
3419 
3420         if (!i40e_add_intr_handlers(i40e)) {
3421                 i40e_error(i40e, "Failed to add the interrupt handlers.");
3422                 goto attach_fail;
3423         }
3424         i40e->i40e_attach_progress |= I40E_ATTACH_ADD_INTR;
3425 
3426         if (!i40e_final_init(i40e)) {
3427                 i40e_error(i40e, "Final initialization failed.");
3428                 goto attach_fail;
3429         }
3430         i40e->i40e_attach_progress |= I40E_ATTACH_INIT;
3431 
3432         if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) !=
3433             DDI_FM_OK) {
3434                 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3435                 goto attach_fail;
3436         }
3437 
3438         if (!i40e_stats_init(i40e)) {
3439                 i40e_error(i40e, "Stats initialization failed.");
3440                 goto attach_fail;
3441         }
3442         i40e->i40e_attach_progress |= I40E_ATTACH_STATS;
3443 
3444         if (!i40e_register_mac(i40e)) {
3445                 i40e_error(i40e, "Failed to register to MAC/GLDv3");
3446                 goto attach_fail;
3447         }
3448         i40e->i40e_attach_progress |= I40E_ATTACH_MAC;
3449 
3450         i40e->i40e_periodic_id = ddi_periodic_add(i40e_timer, i40e,
3451             I40E_CYCLIC_PERIOD, DDI_IPL_0);
3452         if (i40e->i40e_periodic_id == 0) {
3453                 i40e_error(i40e, "Failed to add the link-check timer");
3454                 goto attach_fail;
3455         }
3456         i40e->i40e_attach_progress |= I40E_ATTACH_LINK_TIMER;
3457 
3458         if (!i40e_enable_interrupts(i40e)) {
3459                 i40e_error(i40e, "Failed to enable DDI interrupts");
3460                 goto attach_fail;
3461         }
3462         i40e->i40e_attach_progress |= I40E_ATTACH_ENABLE_INTR;
3463 
3464         if (i40e->i40e_hw_space.bus.func == 0) {
3465                 if (ddi_ufm_init(i40e->i40e_dip, DDI_UFM_CURRENT_VERSION,
3466                     &i40e_ufm_ops, &i40e->i40e_ufmh, i40e) != 0) {
3467                         i40e_error(i40e, "failed to initialize UFM subsystem");
3468                         goto attach_fail;
3469                 }
3470                 ddi_ufm_update(i40e->i40e_ufmh);
3471                 i40e->i40e_attach_progress |= I40E_ATTACH_UFM_INIT;
3472         }
3473 
3474         atomic_or_32(&i40e->i40e_state, I40E_INITIALIZED);
3475 
3476         mutex_enter(&i40e_glock);
3477         list_insert_tail(&i40e_glist, i40e);
3478         mutex_exit(&i40e_glock);
3479 
3480         return (DDI_SUCCESS);
3481 
3482 attach_fail:
3483         i40e_unconfigure(devinfo, i40e);
3484         return (DDI_FAILURE);
3485 }
3486 
3487 static int
3488 i40e_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3489 {
3490         i40e_t *i40e;
3491 
3492         if (cmd != DDI_DETACH)
3493                 return (DDI_FAILURE);
3494 
3495         i40e = (i40e_t *)ddi_get_driver_private(devinfo);
3496         if (i40e == NULL) {
3497                 i40e_log(NULL, "i40e_detach() called with no i40e pointer!");
3498                 return (DDI_FAILURE);
3499         }
3500 
3501         if (i40e_drain_rx(i40e) == B_FALSE) {
3502                 i40e_log(i40e, "timed out draining DMA resources, %d buffers "
3503                     "remain", i40e->i40e_rx_pending);
3504                 return (DDI_FAILURE);
3505         }
3506 
3507         mutex_enter(&i40e_glock);
3508         list_remove(&i40e_glist, i40e);
3509         mutex_exit(&i40e_glock);
3510 
3511         i40e_unconfigure(devinfo, i40e);
3512 
3513         return (DDI_SUCCESS);
3514 }
3515 
3516 static struct cb_ops i40e_cb_ops = {
3517         nulldev,                /* cb_open */
3518         nulldev,                /* cb_close */
3519         nodev,                  /* cb_strategy */
3520         nodev,                  /* cb_print */
3521         nodev,                  /* cb_dump */
3522         nodev,                  /* cb_read */
3523         nodev,                  /* cb_write */
3524         nodev,                  /* cb_ioctl */
3525         nodev,                  /* cb_devmap */
3526         nodev,                  /* cb_mmap */
3527         nodev,                  /* cb_segmap */
3528         nochpoll,               /* cb_chpoll */
3529         ddi_prop_op,            /* cb_prop_op */
3530         NULL,                   /* cb_stream */
3531         D_MP | D_HOTPLUG,       /* cb_flag */
3532         CB_REV,                 /* cb_rev */
3533         nodev,                  /* cb_aread */
3534         nodev                   /* cb_awrite */
3535 };
3536 
3537 static struct dev_ops i40e_dev_ops = {
3538         DEVO_REV,               /* devo_rev */
3539         0,                      /* devo_refcnt */
3540         NULL,                   /* devo_getinfo */
3541         nulldev,                /* devo_identify */
3542         nulldev,                /* devo_probe */
3543         i40e_attach,            /* devo_attach */
3544         i40e_detach,            /* devo_detach */
3545         nodev,                  /* devo_reset */
3546         &i40e_cb_ops,               /* devo_cb_ops */
3547         NULL,                   /* devo_bus_ops */
3548         nulldev,                /* devo_power */
3549         ddi_quiesce_not_supported /* devo_quiesce */
3550 };
3551 
3552 static struct modldrv i40e_modldrv = {
3553         &mod_driverops,
3554         i40e_ident,
3555         &i40e_dev_ops
3556 };
3557 
3558 static struct modlinkage i40e_modlinkage = {
3559         MODREV_1,
3560         &i40e_modldrv,
3561         NULL
3562 };
3563 
3564 /*
3565  * Module Initialization Functions.
3566  */
3567 int
3568 _init(void)
3569 {
3570         int status;
3571 
3572         list_create(&i40e_glist, sizeof (i40e_t), offsetof(i40e_t, i40e_glink));
3573         list_create(&i40e_dlist, sizeof (i40e_device_t),
3574             offsetof(i40e_device_t, id_link));
3575         mutex_init(&i40e_glock, NULL, MUTEX_DRIVER, NULL);
3576         mac_init_ops(&i40e_dev_ops, I40E_MODULE_NAME);
3577 
3578         status = mod_install(&i40e_modlinkage);
3579         if (status != DDI_SUCCESS) {
3580                 mac_fini_ops(&i40e_dev_ops);
3581                 mutex_destroy(&i40e_glock);
3582                 list_destroy(&i40e_dlist);
3583                 list_destroy(&i40e_glist);
3584         }
3585 
3586         return (status);
3587 }
3588 
3589 int
3590 _info(struct modinfo *modinfop)
3591 {
3592         return (mod_info(&i40e_modlinkage, modinfop));
3593 }
3594 
3595 int
3596 _fini(void)
3597 {
3598         int status;
3599 
3600         status = mod_remove(&i40e_modlinkage);
3601         if (status == DDI_SUCCESS) {
3602                 mac_fini_ops(&i40e_dev_ops);
3603                 mutex_destroy(&i40e_glock);
3604                 list_destroy(&i40e_dlist);
3605                 list_destroy(&i40e_glist);
3606         }
3607 
3608         return (status);
3609 }