Print this page
13230 i40e has duplicate traffic when used with bhyve/snoop running
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/i40e/i40e_main.c
+++ new/usr/src/uts/common/io/i40e/i40e_main.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved.
14 14 * Copyright 2019 Joyent, Inc.
15 15 * Copyright 2017 Tegile Systems, Inc. All rights reserved.
16 16 * Copyright 2020 RackTop Systems, Inc.
17 17 * Copyright 2020 Ryan Zezeski
18 18 * Copyright 2021 Oxide Computer Company
19 19 */
20 20
21 21 /*
22 22 * i40e - Intel 10/40 Gb Ethernet driver
23 23 *
24 24 * The i40e driver is the main software device driver for the Intel 40 Gb family
25 25 * of devices. Note that these devices come in many flavors with both 40 GbE
26 26 * ports and 10 GbE ports. This device is the successor to the 82599 family of
27 27 * devices (ixgbe).
28 28 *
29 29 * Unlike previous generations of Intel 1 GbE and 10 GbE devices, the 40 GbE
30 30 * devices defined in the XL710 controller (previously known as Fortville) are a
31 31 * rather different beast and have a small switch embedded inside of them. In
32 32 * addition, the way that most of the programming is done has been overhauled.
33 33 * As opposed to just using PCIe memory mapped registers, it also has an
34 34 * administrative queue which is used to communicate with firmware running on
35 35 * the chip.
36 36 *
37 37 * Each physical function in the hardware shows up as a device that this driver
38 38 * will bind to. The hardware splits many resources evenly across all of the
39 39 * physical functions present on the device, while other resources are instead
40 40 * shared across the entire card and its up to the device driver to
41 41 * intelligently partition them.
42 42 *
43 43 * ------------
44 44 * Organization
45 45 * ------------
46 46 *
47 47 * This driver is made up of several files which have their own theory
48 48 * statements spread across them. We'll touch on the high level purpose of each
49 49 * file here, and then we'll get into more discussion on how the device is
50 50 * generally modelled with respect to the interfaces in illumos.
51 51 *
52 52 * i40e_gld.c: This file contains all of the bindings to MAC and the networking
53 53 * stack.
54 54 *
55 55 * i40e_intr.c: This file contains all of the interrupt service routines and
56 56 * contains logic to enable and disable interrupts on the hardware.
57 57 * It also contains the logic to map hardware resources such as the
58 58 * rings to and from interrupts and controls their ability to fire.
59 59 *
60 60 * There is a big theory statement on interrupts present there.
61 61 *
62 62 * i40e_main.c: The file that you're currently in. It interfaces with the
63 63 * traditional OS DDI interfaces and is in charge of configuring
64 64 * the device.
65 65 *
66 66 * i40e_osdep.[ch]: These files contain interfaces and definitions needed to
67 67 * work with Intel's common code for the device.
68 68 *
69 69 * i40e_stats.c: This file contains the general work and logic around our
70 70 * kstats. A theory statement on their organization and use of the
71 71 * hardware exists there.
72 72 *
73 73 * i40e_sw.h: This header file contains all of the primary structure definitions
74 74 * and constants that are used across the entire driver.
75 75 *
76 76 * i40e_transceiver.c: This file contains all of the logic for sending and
77 77 * receiving data. It contains all of the ring and DMA
78 78 * allocation logic, as well as, the actual interfaces to
79 79 * send and receive data.
80 80 *
81 81 * A big theory statement on ring management, descriptors,
82 82 * and how it ties into the OS is present there.
83 83 *
84 84 * --------------
85 85 * General Design
86 86 * --------------
87 87 *
88 88 * Before we go too far into the general way we've laid out data structures and
89 89 * the like, it's worth taking some time to explain how the hardware is
90 90 * organized. This organization informs a lot of how we do things at this time
91 91 * in the driver.
92 92 *
93 93 * Each physical device consists of a number of one or more ports, which are
94 94 * considered physical functions in the PCI sense and thus each get enumerated
95 95 * by the system, resulting in an instance being created and attached to. While
96 96 * there are many resources that are unique to each physical function eg.
97 97 * instance of the device, there are many that are shared across all of them.
98 98 * Several resources have an amount reserved for each Virtual Station Interface
99 99 * (VSI) and then a static pool of resources, available for all functions on the
100 100 * card.
101 101 *
102 102 * The most important resource in hardware are its transmit and receive queue
103 103 * pairs (i40e_trqpair_t). These should be thought of as rings in GLDv3
104 104 * parlance. There are a set number of these on each device; however, they are
105 105 * statically partitioned among all of the different physical functions.
106 106 *
107 107 * 'Fortville' (the code name for this device family) is basically a switch. To
108 108 * map MAC addresses and other things to queues, we end up having to create
109 109 * Virtual Station Interfaces (VSIs) and establish forwarding rules that direct
110 110 * traffic to a queue. A VSI owns a collection of queues and has a series of
111 111 * forwarding rules that point to it. One way to think of this is to treat it
112 112 * like MAC does a VNIC. When MAC refers to a group, a collection of rings and
113 113 * classification resources, that is a VSI in i40e.
114 114 *
115 115 * The sets of VSIs is shared across the entire device, though there may be some
116 116 * amount that are reserved to each PF. Because the GLDv3 does not let us change
117 117 * the number of groups dynamically, we instead statically divide this amount
118 118 * evenly between all the functions that exist. In addition, we have the same
119 119 * problem with the mac address forwarding rules. There are a static number that
120 120 * exist shared across all the functions.
121 121 *
122 122 * To handle both of these resources, what we end up doing is going through and
123 123 * determining which functions belong to the same device. Nominally one might do
124 124 * this by having a nexus driver; however, a prime requirement for a nexus
125 125 * driver is identifying the various children and activating them. While it is
126 126 * possible to get this information from NVRAM, we would end up duplicating a
127 127 * lot of the PCI enumeration logic. Really, at the end of the day, the device
128 128 * doesn't give us the traditional identification properties we want from a
129 129 * nexus driver.
130 130 *
131 131 * Instead, we rely on some properties that are guaranteed to be unique. While
132 132 * it might be tempting to leverage the PBA or serial number of the device from
133 133 * NVRAM, there is nothing that says that two devices can't be mis-programmed to
134 134 * have the same values in NVRAM. Instead, we uniquely identify a group of
135 135 * functions based on their parent in the /devices tree, their PCI bus and PCI
136 136 * function identifiers. Using either on their own may not be sufficient.
137 137 *
138 138 * For each unique PCI device that we encounter, we'll create a i40e_device_t.
139 139 * From there, because we don't have a good way to tell the GLDv3 about sharing
140 140 * resources between everything, we'll end up just dividing the resources
141 141 * evenly between all of the functions. Longer term, if we don't have to declare
142 142 * to the GLDv3 that these resources are shared, then we'll maintain a pool and
143 143 * have each PF allocate from the pool in the device, thus if only two of four
144 144 * ports are being used, for example, then all of the resources can still be
145 145 * used.
146 146 *
147 147 * -------------------------------------------
148 148 * Transmit and Receive Queue Pair Allocations
149 149 * -------------------------------------------
150 150 *
151 151 * NVRAM ends up assigning each PF its own share of the transmit and receive LAN
152 152 * queue pairs, we have no way of modifying it, only observing it. From there,
153 153 * it's up to us to map these queues to VSIs and VFs. Since we don't support any
154 154 * VFs at this time, we only focus on assignments to VSIs.
155 155 *
156 156 * At the moment, we used a static mapping of transmit/receive queue pairs to a
157 157 * given VSI (eg. rings to a group). Though in the fullness of time, we want to
158 158 * make this something which is fully dynamic and take advantage of documented,
159 159 * but not yet available functionality for adding filters based on VXLAN and
160 160 * other encapsulation technologies.
161 161 *
162 162 * -------------------------------------
163 163 * Broadcast, Multicast, and Promiscuous
164 164 * -------------------------------------
165 165 *
166 166 * As part of the GLDv3, we need to make sure that we can handle receiving
167 167 * broadcast and multicast traffic. As well as enabling promiscuous mode when
168 168 * requested. GLDv3 requires that all broadcast and multicast traffic be
169 169 * retrieved by the default group, eg. the first one. This is the same thing as
170 170 * the default VSI.
171 171 *
172 172 * To receieve broadcast traffic, we enable it through the admin queue, rather
173 173 * than use one of our filters for it. For multicast traffic, we reserve a
174 174 * certain number of the hash filters and assign them to a given PF. When we
175 175 * exceed those, we then switch to using promiscuous mode for multicast traffic.
176 176 *
177 177 * More specifically, once we exceed the number of filters (indicated because
178 178 * the i40e_t`i40e_resources.ifr_nmcastfilt ==
179 179 * i40e_t`i40e_resources.ifr_nmcastfilt_used), we then instead need to toggle
180 180 * promiscuous mode. If promiscuous mode is toggled then we keep track of the
181 181 * number of MACs added to it by incrementing i40e_t`i40e_mcast_promisc_count.
182 182 * That will stay enabled until that count reaches zero indicating that we have
183 183 * only added multicast addresses that we have a corresponding entry for.
184 184 *
185 185 * Because MAC itself wants to toggle promiscuous mode, which includes both
186 186 * unicast and multicast traffic, we go through and keep track of that
187 187 * ourselves. That is maintained through the use of the i40e_t`i40e_promisc_on
188 188 * member.
189 189 *
190 190 * --------------
191 191 * VSI Management
192 192 * --------------
193 193 *
194 194 * The PFs share 384 VSIs. The firmware creates one VSI per PF by default.
195 195 * During chip start we retrieve the SEID of this VSI and assign it as the
196 196 * default VSI for our VEB (one VEB per PF). We then add additional VSIs to
197 197 * the VEB up to the determined number of rx groups: i40e_t`i40e_num_rx_groups.
198 198 * We currently cap this number to I40E_GROUP_MAX to a) make sure all PFs can
199 199 * allocate the same number of VSIs, and b) to keep the interrupt multiplexing
200 200 * under control. In the future, when we improve the interrupt allocation, we
201 201 * may want to revisit this cap to make better use of the available VSIs. The
202 202 * VSI allocation and configuration can be found in i40e_chip_start().
203 203 *
204 204 * ----------------
205 205 * Structure Layout
206 206 * ----------------
207 207 *
208 208 * The following images relates the core data structures together. The primary
209 209 * structure in the system is the i40e_t. It itself contains multiple rings,
210 210 * i40e_trqpair_t's which contain the various transmit and receive data. The
211 211 * receive data is stored outside of the i40e_trqpair_t and instead in the
212 212 * i40e_rx_data_t. The i40e_t has a corresponding i40e_device_t which keeps
213 213 * track of per-physical device state. Finally, for every active descriptor,
214 214 * there is a corresponding control block, which is where the
215 215 * i40e_rx_control_block_t and the i40e_tx_control_block_t come from.
216 216 *
217 217 * +-----------------------+ +-----------------------+
218 218 * | Global i40e_t list | | Global Device list |
219 219 * | | +--| |
220 220 * | i40e_glist | | | i40e_dlist |
221 221 * +-----------------------+ | +-----------------------+
222 222 * | v
223 223 * | +------------------------+ +-----------------------+
224 224 * | | Device-wide Structure |----->| Device-wide Structure |--> ...
225 225 * | | i40e_device_t | | i40e_device_t |
226 226 * | | | +-----------------------+
227 227 * | | dev_info_t * ------+--> Parent in devices tree.
228 228 * | | uint_t ------+--> PCI bus number
229 229 * | | uint_t ------+--> PCI device number
230 230 * | | uint_t ------+--> Number of functions
231 231 * | | i40e_switch_rsrcs_t ---+--> Captured total switch resources
232 232 * | | list_t ------+-------------+
233 233 * | +------------------------+ |
234 234 * | ^ |
235 235 * | +--------+ |
236 236 * | | v
237 237 * | +---------------------------+ | +-------------------+
238 238 * +->| GLDv3 Device, per PF |-----|-->| GLDv3 Device (PF) |--> ...
239 239 * | i40e_t | | | i40e_t |
240 240 * | **Primary Structure** | | +-------------------+
241 241 * | | |
242 242 * | i40e_device_t * --+-----+
243 243 * | i40e_state_t --+---> Device State
244 244 * | i40e_hw_t --+---> Intel common code structure
245 245 * | mac_handle_t --+---> GLDv3 handle to MAC
246 246 * | ddi_periodic_t --+---> Link activity timer
247 247 * | i40e_vsi_t * --+---> Array of VSIs
248 248 * | i40e_func_rsrc_t --+---> Available hardware resources
249 249 * | i40e_switch_rsrc_t * --+---> Switch resource snapshot
250 250 * | i40e_sdu --+---> Current MTU
251 251 * | i40e_frame_max --+---> Current HW frame size
252 252 * | i40e_uaddr_t * --+---> Array of assigned unicast MACs
253 253 * | i40e_maddr_t * --+---> Array of assigned multicast MACs
254 254 * | i40e_mcast_promisccount --+---> Active multicast state
255 255 * | i40e_promisc_on --+---> Current promiscuous mode state
256 256 * | uint_t --+---> Number of transmit/receive pairs
257 257 * | i40e_rx_group_t * --+---> Array of Rx groups
258 258 * | kstat_t * --+---> PF kstats
259 259 * | i40e_pf_stats_t --+---> PF kstat backing data
260 260 * | i40e_trqpair_t * --+---------+
261 261 * +---------------------------+ |
262 262 * |
263 263 * v
264 264 * +-------------------------------+ +-----------------------------+
265 265 * | Transmit/Receive Queue Pair |-------| Transmit/Receive Queue Pair |->...
266 266 * | i40e_trqpair_t | | i40e_trqpair_t |
267 267 * + Ring Data Structure | +-----------------------------+
268 268 * | |
269 269 * | mac_ring_handle_t +--> MAC RX ring handle
270 270 * | mac_ring_handle_t +--> MAC TX ring handle
271 271 * | i40e_rxq_stat_t --+--> RX Queue stats
272 272 * | i40e_txq_stat_t --+--> TX Queue stats
273 273 * | uint32_t (tx ring size) +--> TX Ring Size
274 274 * | uint32_t (tx free list size) +--> TX Free List Size
275 275 * | i40e_dma_buffer_t --------+--> TX Descriptor ring DMA
276 276 * | i40e_tx_desc_t * --------+--> TX descriptor ring
277 277 * | volatile unt32_t * +--> TX Write back head
278 278 * | uint32_t -------+--> TX ring head
279 279 * | uint32_t -------+--> TX ring tail
280 280 * | uint32_t -------+--> Num TX desc free
281 281 * | i40e_tx_control_block_t * --+--> TX control block array ---+
282 282 * | i40e_tx_control_block_t ** --+--> TCB work list ----+
283 283 * | i40e_tx_control_block_t ** --+--> TCB free list ---+
284 284 * | uint32_t -------+--> Free TCB count |
285 285 * | i40e_rx_data_t * -------+--+ v
286 286 * +-------------------------------+ | +---------------------------+
287 287 * | | Per-TX Frame Metadata |
288 288 * | | i40e_tx_control_block_t |
289 289 * +--------------------+ | |
290 290 * | mblk to transmit <--+--- mblk_t * |
291 291 * | type of transmit <--+--- i40e_tx_type_t |
292 292 * | TX DMA handle <--+--- ddi_dma_handle_t |
293 293 * v TX DMA buffer <--+--- i40e_dma_buffer_t |
294 294 * +------------------------------+ +---------------------------+
295 295 * | Core Receive Data |
296 296 * | i40e_rx_data_t |
297 297 * | |
298 298 * | i40e_dma_buffer_t --+--> RX descriptor DMA Data
299 299 * | i40e_rx_desc_t --+--> RX descriptor ring
300 300 * | uint32_t --+--> Next free desc.
301 301 * | i40e_rx_control_block_t * --+--> RX Control Block Array ---+
302 302 * | i40e_rx_control_block_t ** --+--> RCB work list ---+
303 303 * | i40e_rx_control_block_t ** --+--> RCB free list ---+
304 304 * +------------------------------+ |
305 305 * ^ |
306 306 * | +---------------------------+ |
307 307 * | | Per-RX Frame Metadata |<---------------+
308 308 * | | i40e_rx_control_block_t |
309 309 * | | |
310 310 * | | mblk_t * ----+--> Received mblk_t data
311 311 * | | uint32_t ----+--> Reference count
312 312 * | | i40e_dma_buffer_t ----+--> Receive data DMA info
313 313 * | | frtn_t ----+--> mblk free function info
314 314 * +-----+-- i40e_rx_data_t * |
315 315 * +---------------------------+
316 316 *
317 317 * -------------
318 318 * Lock Ordering
319 319 * -------------
320 320 *
321 321 * In order to ensure that we don't deadlock, the following represents the
322 322 * lock order being used. When grabbing locks, follow the following order. Lower
323 323 * numbers are more important. Thus, the i40e_glock which is number 0, must be
324 324 * taken before any other locks in the driver. On the other hand, the
325 325 * i40e_t`i40e_stat_lock, has the highest number because it's the least
326 326 * important lock. Note, that just because one lock is higher than another does
327 327 * not mean that all intermediary locks are required.
328 328 *
329 329 * 0) i40e_glock
330 330 * 1) i40e_t`i40e_general_lock
331 331 *
332 332 * 2) i40e_trqpair_t`itrq_rx_lock
333 333 * 3) i40e_trqpair_t`itrq_tx_lock
334 334 * 4) i40e_trqpair_t`itrq_intr_lock
335 335 * 5) i40e_t`i40e_rx_pending_lock
336 336 * 6) i40e_trqpair_t`itrq_tcb_lock
337 337 *
338 338 * 7) i40e_t`i40e_stat_lock
339 339 *
340 340 * Rules and expectations:
341 341 *
342 342 * 1) A thread holding locks belong to one PF should not hold locks belonging to
343 343 * a second. If for some reason this becomes necessary, locks should be grabbed
344 344 * based on the list order in the i40e_device_t, which implies that the
345 345 * i40e_glock is held.
346 346 *
347 347 * 2) When grabbing locks between multiple transmit and receive queues, the
348 348 * locks for the lowest number transmit/receive queue should be grabbed first.
349 349 *
350 350 * 3) When grabbing both the transmit and receive lock for a given queue, always
351 351 * grab i40e_trqpair_t`itrq_rx_lock before the i40e_trqpair_t`itrq_tx_lock.
352 352 *
353 353 * 4) The following pairs of locks are not expected to be held at the same time:
354 354 *
355 355 * o i40e_t`i40e_rx_pending_lock and i40e_trqpair_t`itrq_tcb_lock
356 356 * o i40e_trqpair_t`itrq_intr_lock is not expected to be held with any
357 357 * other lock except i40e_t`i40e_general_lock in mc_start(9E) and
358 358 * mc_stop(9e).
359 359 *
360 360 * -----------
361 361 * Future Work
362 362 * -----------
363 363 *
364 364 * At the moment the i40e_t driver is rather bare bones, allowing us to start
365 365 * getting data flowing and folks using it while we develop additional features.
366 366 * While bugs have been filed to cover this future work, the following gives an
367 367 * overview of expected work:
368 368 *
|
↓ open down ↓ |
368 lines elided |
↑ open up ↑ |
369 369 * o DMA binding and breaking up the locking in ring recycling.
370 370 * o Enhanced detection of device errors
371 371 * o Participation in IRM
372 372 * o FMA device reset
373 373 * o Stall detection, temperature error detection, etc.
374 374 * o More dynamic resource pools
375 375 */
376 376
377 377 #include "i40e_sw.h"
378 378
379 -static char i40e_ident[] = "Intel 10/40Gb Ethernet v1.0.3";
379 +static char i40e_ident[] = "Intel 10/40Gb Ethern0t v1.0.3";
380 380
381 381 /*
382 382 * The i40e_glock primarily protects the lists below and the i40e_device_t
383 383 * structures.
384 384 */
385 385 static kmutex_t i40e_glock;
386 386 static list_t i40e_glist;
387 387 static list_t i40e_dlist;
388 388
389 389 /*
390 390 * Access attributes for register mapping.
391 391 */
392 392 static ddi_device_acc_attr_t i40e_regs_acc_attr = {
393 393 DDI_DEVICE_ATTR_V1,
394 394 DDI_STRUCTURE_LE_ACC,
395 395 DDI_STRICTORDER_ACC,
396 396 DDI_FLAGERR_ACC
397 397 };
398 398
399 399 /*
400 400 * Logging function for this driver.
401 401 */
402 402 static void
403 403 i40e_dev_err(i40e_t *i40e, int level, boolean_t console, const char *fmt,
404 404 va_list ap)
405 405 {
406 406 char buf[1024];
407 407
408 408 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
409 409
410 410 if (i40e == NULL) {
411 411 cmn_err(level, (console) ? "%s: %s" : "!%s: %s",
412 412 I40E_MODULE_NAME, buf);
413 413 } else {
414 414 dev_err(i40e->i40e_dip, level, (console) ? "%s" : "!%s",
415 415 buf);
416 416 }
417 417 }
418 418
419 419 /*
420 420 * Because there's the stupid trailing-comma problem with the C preprocessor
421 421 * and variable arguments, I need to instantiate these. Pardon the redundant
422 422 * code.
423 423 */
424 424 /*PRINTFLIKE2*/
425 425 void
426 426 i40e_error(i40e_t *i40e, const char *fmt, ...)
427 427 {
428 428 va_list ap;
429 429
430 430 va_start(ap, fmt);
431 431 i40e_dev_err(i40e, CE_WARN, B_FALSE, fmt, ap);
432 432 va_end(ap);
433 433 }
434 434
435 435 /*PRINTFLIKE2*/
436 436 void
437 437 i40e_log(i40e_t *i40e, const char *fmt, ...)
438 438 {
439 439 va_list ap;
440 440
441 441 va_start(ap, fmt);
442 442 i40e_dev_err(i40e, CE_NOTE, B_FALSE, fmt, ap);
443 443 va_end(ap);
444 444 }
445 445
446 446 /*PRINTFLIKE2*/
447 447 void
448 448 i40e_notice(i40e_t *i40e, const char *fmt, ...)
449 449 {
450 450 va_list ap;
451 451
452 452 va_start(ap, fmt);
453 453 i40e_dev_err(i40e, CE_NOTE, B_TRUE, fmt, ap);
454 454 va_end(ap);
455 455 }
456 456
457 457 /*
458 458 * Various parts of the driver need to know if the controller is from the X722
459 459 * family, which has a few additional capabilities and different programming
460 460 * means. We don't consider virtual functions as part of this as they are quite
461 461 * different and will require substantially more work.
462 462 */
463 463 static boolean_t
464 464 i40e_is_x722(i40e_t *i40e)
465 465 {
466 466 return (i40e->i40e_hw_space.mac.type == I40E_MAC_X722);
467 467 }
468 468
469 469 static void
470 470 i40e_device_rele(i40e_t *i40e)
471 471 {
472 472 i40e_device_t *idp = i40e->i40e_device;
473 473
474 474 if (idp == NULL)
475 475 return;
476 476
477 477 mutex_enter(&i40e_glock);
478 478 VERIFY(idp->id_nreg > 0);
479 479 list_remove(&idp->id_i40e_list, i40e);
480 480 idp->id_nreg--;
481 481 if (idp->id_nreg == 0) {
482 482 list_remove(&i40e_dlist, idp);
483 483 list_destroy(&idp->id_i40e_list);
484 484 kmem_free(idp->id_rsrcs, sizeof (i40e_switch_rsrc_t) *
485 485 idp->id_rsrcs_alloc);
486 486 kmem_free(idp, sizeof (i40e_device_t));
487 487 }
488 488 i40e->i40e_device = NULL;
489 489 mutex_exit(&i40e_glock);
490 490 }
491 491
492 492 static i40e_device_t *
493 493 i40e_device_find(i40e_t *i40e, dev_info_t *parent, uint_t bus, uint_t device)
494 494 {
495 495 i40e_device_t *idp;
496 496 mutex_enter(&i40e_glock);
497 497 for (idp = list_head(&i40e_dlist); idp != NULL;
498 498 idp = list_next(&i40e_dlist, idp)) {
499 499 if (idp->id_parent == parent && idp->id_pci_bus == bus &&
500 500 idp->id_pci_device == device) {
501 501 break;
502 502 }
503 503 }
504 504
505 505 if (idp != NULL) {
506 506 VERIFY(idp->id_nreg < idp->id_nfuncs);
507 507 idp->id_nreg++;
508 508 } else {
509 509 i40e_hw_t *hw = &i40e->i40e_hw_space;
510 510 ASSERT(hw->num_ports > 0);
511 511 ASSERT(hw->num_partitions > 0);
512 512
513 513 /*
514 514 * The Intel common code doesn't exactly keep the number of PCI
515 515 * functions. But it calculates it during discovery of
516 516 * partitions and ports. So what we do is undo the calculation
517 517 * that it does originally, as functions are evenly spread
518 518 * across ports in the rare case of partitions.
519 519 */
520 520 idp = kmem_alloc(sizeof (i40e_device_t), KM_SLEEP);
521 521 idp->id_parent = parent;
522 522 idp->id_pci_bus = bus;
523 523 idp->id_pci_device = device;
524 524 idp->id_nfuncs = hw->num_ports * hw->num_partitions;
525 525 idp->id_nreg = 1;
526 526 idp->id_rsrcs_alloc = i40e->i40e_switch_rsrc_alloc;
527 527 idp->id_rsrcs_act = i40e->i40e_switch_rsrc_actual;
528 528 idp->id_rsrcs = kmem_alloc(sizeof (i40e_switch_rsrc_t) *
529 529 idp->id_rsrcs_alloc, KM_SLEEP);
530 530 bcopy(i40e->i40e_switch_rsrcs, idp->id_rsrcs,
531 531 sizeof (i40e_switch_rsrc_t) * idp->id_rsrcs_alloc);
532 532 list_create(&idp->id_i40e_list, sizeof (i40e_t),
533 533 offsetof(i40e_t, i40e_dlink));
534 534
535 535 list_insert_tail(&i40e_dlist, idp);
536 536 }
537 537
538 538 list_insert_tail(&idp->id_i40e_list, i40e);
539 539 mutex_exit(&i40e_glock);
540 540
541 541 return (idp);
542 542 }
543 543
544 544 static void
545 545 i40e_link_state_set(i40e_t *i40e, link_state_t state)
546 546 {
547 547 if (i40e->i40e_link_state == state)
548 548 return;
549 549
550 550 i40e->i40e_link_state = state;
551 551 mac_link_update(i40e->i40e_mac_hdl, i40e->i40e_link_state);
552 552 }
553 553
554 554 /*
555 555 * This is a basic link check routine. Mostly we're using this just to see
556 556 * if we can get any accurate information about the state of the link being
557 557 * up or down, as well as updating the link state, speed, etc. information.
558 558 */
559 559 void
560 560 i40e_link_check(i40e_t *i40e)
561 561 {
562 562 i40e_hw_t *hw = &i40e->i40e_hw_space;
563 563 boolean_t ls;
564 564 int ret;
565 565
566 566 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
567 567
568 568 hw->phy.get_link_info = B_TRUE;
569 569 if ((ret = i40e_get_link_status(hw, &ls)) != I40E_SUCCESS) {
570 570 i40e->i40e_s_link_status_errs++;
571 571 i40e->i40e_s_link_status_lasterr = ret;
572 572 return;
573 573 }
574 574
575 575 /*
576 576 * Firmware abstracts all of the mac and phy information for us, so we
577 577 * can use i40e_get_link_status to determine the current state.
578 578 */
579 579 if (ls == B_TRUE) {
580 580 enum i40e_aq_link_speed speed;
581 581
582 582 speed = i40e_get_link_speed(hw);
583 583
584 584 /*
585 585 * Translate from an i40e value to a value in Mbits/s.
586 586 */
587 587 switch (speed) {
588 588 case I40E_LINK_SPEED_100MB:
589 589 i40e->i40e_link_speed = 100;
590 590 break;
591 591 case I40E_LINK_SPEED_1GB:
592 592 i40e->i40e_link_speed = 1000;
593 593 break;
594 594 case I40E_LINK_SPEED_2_5GB:
595 595 i40e->i40e_link_speed = 2500;
596 596 break;
597 597 case I40E_LINK_SPEED_5GB:
598 598 i40e->i40e_link_speed = 5000;
599 599 break;
600 600 case I40E_LINK_SPEED_10GB:
601 601 i40e->i40e_link_speed = 10000;
602 602 break;
603 603 case I40E_LINK_SPEED_20GB:
604 604 i40e->i40e_link_speed = 20000;
605 605 break;
606 606 case I40E_LINK_SPEED_40GB:
607 607 i40e->i40e_link_speed = 40000;
608 608 break;
609 609 case I40E_LINK_SPEED_25GB:
610 610 i40e->i40e_link_speed = 25000;
611 611 break;
612 612 default:
613 613 i40e->i40e_link_speed = 0;
614 614 break;
615 615 }
616 616
617 617 /*
618 618 * At this time, hardware does not support half-duplex
619 619 * operation, hence why we don't ask the hardware about our
620 620 * current speed.
621 621 */
622 622 i40e->i40e_link_duplex = LINK_DUPLEX_FULL;
623 623 i40e_link_state_set(i40e, LINK_STATE_UP);
624 624 } else {
625 625 i40e->i40e_link_speed = 0;
626 626 i40e->i40e_link_duplex = 0;
627 627 i40e_link_state_set(i40e, LINK_STATE_DOWN);
628 628 }
629 629 }
630 630
631 631 static void
632 632 i40e_rem_intrs(i40e_t *i40e)
633 633 {
634 634 int i, rc;
635 635
636 636 for (i = 0; i < i40e->i40e_intr_count; i++) {
637 637 rc = ddi_intr_free(i40e->i40e_intr_handles[i]);
638 638 if (rc != DDI_SUCCESS) {
639 639 i40e_log(i40e, "failed to free interrupt %d: %d",
640 640 i, rc);
641 641 }
642 642 }
643 643
644 644 kmem_free(i40e->i40e_intr_handles, i40e->i40e_intr_size);
645 645 i40e->i40e_intr_handles = NULL;
646 646 }
647 647
648 648 static void
649 649 i40e_rem_intr_handlers(i40e_t *i40e)
650 650 {
651 651 int i, rc;
652 652
653 653 for (i = 0; i < i40e->i40e_intr_count; i++) {
654 654 rc = ddi_intr_remove_handler(i40e->i40e_intr_handles[i]);
655 655 if (rc != DDI_SUCCESS) {
656 656 i40e_log(i40e, "failed to remove interrupt %d: %d",
657 657 i, rc);
658 658 }
659 659 }
660 660 }
661 661
662 662 /*
663 663 * illumos Fault Management Architecture (FMA) support.
664 664 */
665 665
666 666 int
667 667 i40e_check_acc_handle(ddi_acc_handle_t handle)
668 668 {
669 669 ddi_fm_error_t de;
670 670
671 671 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
672 672 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
673 673 return (de.fme_status);
674 674 }
675 675
676 676 int
677 677 i40e_check_dma_handle(ddi_dma_handle_t handle)
678 678 {
679 679 ddi_fm_error_t de;
680 680
681 681 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
682 682 return (de.fme_status);
683 683 }
684 684
685 685 /*
686 686 * Fault service error handling callback function.
687 687 */
688 688 /* ARGSUSED */
689 689 static int
690 690 i40e_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
691 691 {
692 692 pci_ereport_post(dip, err, NULL);
693 693 return (err->fme_status);
694 694 }
695 695
696 696 static void
697 697 i40e_fm_init(i40e_t *i40e)
698 698 {
699 699 ddi_iblock_cookie_t iblk;
700 700
701 701 i40e->i40e_fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY,
702 702 i40e->i40e_dip, DDI_PROP_DONTPASS, "fm_capable",
703 703 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
704 704 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
705 705
706 706 if (i40e->i40e_fm_capabilities < 0) {
707 707 i40e->i40e_fm_capabilities = 0;
708 708 } else if (i40e->i40e_fm_capabilities > 0xf) {
709 709 i40e->i40e_fm_capabilities = DDI_FM_EREPORT_CAPABLE |
710 710 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
711 711 DDI_FM_ERRCB_CAPABLE;
712 712 }
713 713
714 714 /*
715 715 * Only register with IO Fault Services if we have some capability
716 716 */
717 717 if (i40e->i40e_fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
718 718 i40e_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
719 719 } else {
720 720 i40e_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
721 721 }
722 722
723 723 if (i40e->i40e_fm_capabilities) {
724 724 ddi_fm_init(i40e->i40e_dip, &i40e->i40e_fm_capabilities, &iblk);
725 725
726 726 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) ||
727 727 DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) {
728 728 pci_ereport_setup(i40e->i40e_dip);
729 729 }
730 730
731 731 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) {
732 732 ddi_fm_handler_register(i40e->i40e_dip,
733 733 i40e_fm_error_cb, (void*)i40e);
734 734 }
735 735 }
736 736
737 737 if (i40e->i40e_fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
738 738 i40e_init_dma_attrs(i40e, B_TRUE);
739 739 } else {
740 740 i40e_init_dma_attrs(i40e, B_FALSE);
741 741 }
742 742 }
743 743
744 744 static void
745 745 i40e_fm_fini(i40e_t *i40e)
746 746 {
747 747 if (i40e->i40e_fm_capabilities) {
748 748
749 749 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) ||
750 750 DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities))
751 751 pci_ereport_teardown(i40e->i40e_dip);
752 752
753 753 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities))
754 754 ddi_fm_handler_unregister(i40e->i40e_dip);
755 755
756 756 ddi_fm_fini(i40e->i40e_dip);
757 757 }
758 758 }
759 759
760 760 void
761 761 i40e_fm_ereport(i40e_t *i40e, char *detail)
762 762 {
763 763 uint64_t ena;
764 764 char buf[FM_MAX_CLASS];
765 765
766 766 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
767 767 ena = fm_ena_generate(0, FM_ENA_FMT1);
768 768 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities)) {
769 769 ddi_fm_ereport_post(i40e->i40e_dip, buf, ena, DDI_NOSLEEP,
770 770 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
771 771 }
772 772 }
773 773
774 774 /*
775 775 * Here we're trying to set the SEID of the default VSI. In general,
776 776 * when we come through and look at this shortly after attach, we
777 777 * expect there to only be a single element present, which is the
778 778 * default VSI. Importantly, each PF seems to not see any other
779 779 * devices, in part because of the simple switch mode that we're
780 780 * using. If for some reason, we see more artifacts, we'll need to
781 781 * revisit what we're doing here.
782 782 */
783 783 static boolean_t
784 784 i40e_set_def_vsi_seid(i40e_t *i40e)
785 785 {
786 786 i40e_hw_t *hw = &i40e->i40e_hw_space;
787 787 struct i40e_aqc_get_switch_config_resp *sw_config;
788 788 uint8_t aq_buf[I40E_AQ_LARGE_BUF];
789 789 uint16_t next = 0;
790 790 int rc;
791 791
792 792 /* LINTED: E_BAD_PTR_CAST_ALIGN */
793 793 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
794 794 rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next,
795 795 NULL);
796 796 if (rc != I40E_SUCCESS) {
797 797 i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d",
798 798 rc, hw->aq.asq_last_status);
799 799 return (B_FALSE);
800 800 }
801 801
802 802 if (LE_16(sw_config->header.num_reported) != 1) {
803 803 i40e_error(i40e, "encountered multiple (%d) switching units "
804 804 "during attach, not proceeding",
805 805 LE_16(sw_config->header.num_reported));
806 806 return (B_FALSE);
807 807 }
808 808
809 809 I40E_DEF_VSI_SEID(i40e) = sw_config->element[0].seid;
810 810 return (B_TRUE);
811 811 }
812 812
813 813 /*
814 814 * Get the SEID of the uplink MAC.
815 815 */
816 816 static int
817 817 i40e_get_mac_seid(i40e_t *i40e)
818 818 {
819 819 i40e_hw_t *hw = &i40e->i40e_hw_space;
820 820 struct i40e_aqc_get_switch_config_resp *sw_config;
821 821 uint8_t aq_buf[I40E_AQ_LARGE_BUF];
822 822 uint16_t next = 0;
823 823 int rc;
824 824
825 825 /* LINTED: E_BAD_PTR_CAST_ALIGN */
826 826 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
827 827 rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next,
828 828 NULL);
829 829 if (rc != I40E_SUCCESS) {
830 830 i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d",
831 831 rc, hw->aq.asq_last_status);
832 832 return (-1);
833 833 }
834 834
835 835 return (LE_16(sw_config->element[0].uplink_seid));
836 836 }
837 837
838 838 /*
839 839 * We need to fill the i40e_hw_t structure with the capabilities of this PF. We
840 840 * must also provide the memory for it; however, we don't need to keep it around
841 841 * to the call to the common code. It takes it and parses it into an internal
842 842 * structure.
843 843 */
844 844 static boolean_t
845 845 i40e_get_hw_capabilities(i40e_t *i40e, i40e_hw_t *hw)
846 846 {
847 847 struct i40e_aqc_list_capabilities_element_resp *buf;
848 848 int rc;
849 849 size_t len;
850 850 uint16_t needed;
851 851 int nelems = I40E_HW_CAP_DEFAULT;
852 852
853 853 len = nelems * sizeof (*buf);
854 854
855 855 for (;;) {
856 856 ASSERT(len > 0);
857 857 buf = kmem_alloc(len, KM_SLEEP);
858 858 rc = i40e_aq_discover_capabilities(hw, buf, len,
859 859 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
860 860 kmem_free(buf, len);
861 861
862 862 if (hw->aq.asq_last_status == I40E_AQ_RC_ENOMEM &&
863 863 nelems == I40E_HW_CAP_DEFAULT) {
864 864 if (nelems == needed) {
865 865 i40e_error(i40e, "Capability discovery failed "
866 866 "due to byzantine common code");
867 867 return (B_FALSE);
868 868 }
869 869 len = needed;
870 870 continue;
871 871 } else if (rc != I40E_SUCCESS ||
872 872 hw->aq.asq_last_status != I40E_AQ_RC_OK) {
873 873 i40e_error(i40e, "Capability discovery failed: %d", rc);
874 874 return (B_FALSE);
875 875 }
876 876
877 877 break;
878 878 }
879 879
880 880 return (B_TRUE);
881 881 }
882 882
883 883 /*
884 884 * Obtain the switch's capabilities as seen by this PF and keep it around for
885 885 * our later use.
886 886 */
887 887 static boolean_t
888 888 i40e_get_switch_resources(i40e_t *i40e)
889 889 {
890 890 i40e_hw_t *hw = &i40e->i40e_hw_space;
891 891 uint8_t cnt = 2;
892 892 uint8_t act;
893 893 size_t size;
894 894 i40e_switch_rsrc_t *buf;
895 895
896 896 for (;;) {
897 897 enum i40e_status_code ret;
898 898 size = cnt * sizeof (i40e_switch_rsrc_t);
899 899 ASSERT(size > 0);
900 900 if (size > UINT16_MAX)
901 901 return (B_FALSE);
902 902 buf = kmem_alloc(size, KM_SLEEP);
903 903
904 904 ret = i40e_aq_get_switch_resource_alloc(hw, &act, buf,
905 905 cnt, NULL);
906 906 if (ret == I40E_ERR_ADMIN_QUEUE_ERROR &&
907 907 hw->aq.asq_last_status == I40E_AQ_RC_EINVAL) {
908 908 kmem_free(buf, size);
909 909 cnt += I40E_SWITCH_CAP_DEFAULT;
910 910 continue;
911 911 } else if (ret != I40E_SUCCESS) {
912 912 kmem_free(buf, size);
913 913 i40e_error(i40e,
914 914 "failed to retrieve switch statistics: %d", ret);
915 915 return (B_FALSE);
916 916 }
917 917
918 918 break;
919 919 }
920 920
921 921 i40e->i40e_switch_rsrc_alloc = cnt;
922 922 i40e->i40e_switch_rsrc_actual = act;
923 923 i40e->i40e_switch_rsrcs = buf;
924 924
925 925 return (B_TRUE);
926 926 }
927 927
928 928 static void
929 929 i40e_cleanup_resources(i40e_t *i40e)
930 930 {
931 931 if (i40e->i40e_uaddrs != NULL) {
932 932 kmem_free(i40e->i40e_uaddrs, sizeof (i40e_uaddr_t) *
933 933 i40e->i40e_resources.ifr_nmacfilt);
934 934 i40e->i40e_uaddrs = NULL;
935 935 }
936 936
937 937 if (i40e->i40e_maddrs != NULL) {
938 938 kmem_free(i40e->i40e_maddrs, sizeof (i40e_maddr_t) *
939 939 i40e->i40e_resources.ifr_nmcastfilt);
940 940 i40e->i40e_maddrs = NULL;
941 941 }
942 942
943 943 if (i40e->i40e_switch_rsrcs != NULL) {
944 944 size_t sz = sizeof (i40e_switch_rsrc_t) *
945 945 i40e->i40e_switch_rsrc_alloc;
946 946 ASSERT(sz > 0);
947 947 kmem_free(i40e->i40e_switch_rsrcs, sz);
948 948 i40e->i40e_switch_rsrcs = NULL;
949 949 }
950 950
951 951 if (i40e->i40e_device != NULL)
952 952 i40e_device_rele(i40e);
953 953 }
954 954
955 955 static boolean_t
956 956 i40e_get_available_resources(i40e_t *i40e)
957 957 {
958 958 dev_info_t *parent;
959 959 uint16_t bus, device, func;
960 960 uint_t nregs;
961 961 int *regs, i;
962 962 i40e_device_t *idp;
963 963 i40e_hw_t *hw = &i40e->i40e_hw_space;
964 964
965 965 parent = ddi_get_parent(i40e->i40e_dip);
966 966
967 967 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, i40e->i40e_dip, 0, "reg",
968 968 ®s, &nregs) != DDI_PROP_SUCCESS) {
969 969 return (B_FALSE);
970 970 }
971 971
972 972 if (nregs < 1) {
973 973 ddi_prop_free(regs);
974 974 return (B_FALSE);
975 975 }
976 976
977 977 bus = PCI_REG_BUS_G(regs[0]);
978 978 device = PCI_REG_DEV_G(regs[0]);
979 979 func = PCI_REG_FUNC_G(regs[0]);
980 980 ddi_prop_free(regs);
981 981
982 982 i40e->i40e_hw_space.bus.func = func;
983 983 i40e->i40e_hw_space.bus.device = device;
984 984
985 985 if (i40e_get_switch_resources(i40e) == B_FALSE) {
986 986 return (B_FALSE);
987 987 }
988 988
989 989 /*
990 990 * To calculate the total amount of a resource we have available, we
991 991 * need to add how many our i40e_t thinks it has guaranteed, if any, and
992 992 * then we need to go through and divide the number of available on the
993 993 * device, which was snapshotted before anyone should have allocated
994 994 * anything, and use that to derive how many are available from the
995 995 * pool. Longer term, we may want to turn this into something that's
996 996 * more of a pool-like resource that everything can share (though that
997 997 * may require some more assistance from MAC).
998 998 *
999 999 * Though for transmit and receive queue pairs, we just have to ask
1000 1000 * firmware instead.
1001 1001 */
1002 1002 idp = i40e_device_find(i40e, parent, bus, device);
1003 1003 i40e->i40e_device = idp;
1004 1004 i40e->i40e_resources.ifr_nvsis = 0;
1005 1005 i40e->i40e_resources.ifr_nvsis_used = 0;
1006 1006 i40e->i40e_resources.ifr_nmacfilt = 0;
1007 1007 i40e->i40e_resources.ifr_nmacfilt_used = 0;
1008 1008 i40e->i40e_resources.ifr_nmcastfilt = 0;
1009 1009 i40e->i40e_resources.ifr_nmcastfilt_used = 0;
1010 1010
1011 1011 for (i = 0; i < i40e->i40e_switch_rsrc_actual; i++) {
1012 1012 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i];
1013 1013
1014 1014 switch (srp->resource_type) {
1015 1015 case I40E_AQ_RESOURCE_TYPE_VSI:
1016 1016 i40e->i40e_resources.ifr_nvsis +=
1017 1017 LE_16(srp->guaranteed);
1018 1018 i40e->i40e_resources.ifr_nvsis_used = LE_16(srp->used);
1019 1019 break;
1020 1020 case I40E_AQ_RESOURCE_TYPE_MACADDR:
1021 1021 i40e->i40e_resources.ifr_nmacfilt +=
1022 1022 LE_16(srp->guaranteed);
1023 1023 i40e->i40e_resources.ifr_nmacfilt_used =
1024 1024 LE_16(srp->used);
1025 1025 break;
1026 1026 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
1027 1027 i40e->i40e_resources.ifr_nmcastfilt +=
1028 1028 LE_16(srp->guaranteed);
1029 1029 i40e->i40e_resources.ifr_nmcastfilt_used =
1030 1030 LE_16(srp->used);
1031 1031 break;
1032 1032 default:
1033 1033 break;
1034 1034 }
1035 1035 }
1036 1036
1037 1037 for (i = 0; i < idp->id_rsrcs_act; i++) {
1038 1038 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i];
1039 1039 switch (srp->resource_type) {
1040 1040 case I40E_AQ_RESOURCE_TYPE_VSI:
1041 1041 i40e->i40e_resources.ifr_nvsis +=
1042 1042 LE_16(srp->total_unalloced) / idp->id_nfuncs;
1043 1043 break;
1044 1044 case I40E_AQ_RESOURCE_TYPE_MACADDR:
1045 1045 i40e->i40e_resources.ifr_nmacfilt +=
1046 1046 LE_16(srp->total_unalloced) / idp->id_nfuncs;
1047 1047 break;
1048 1048 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
1049 1049 i40e->i40e_resources.ifr_nmcastfilt +=
1050 1050 LE_16(srp->total_unalloced) / idp->id_nfuncs;
1051 1051 default:
1052 1052 break;
1053 1053 }
1054 1054 }
1055 1055
1056 1056 i40e->i40e_resources.ifr_nrx_queue = hw->func_caps.num_rx_qp;
1057 1057 i40e->i40e_resources.ifr_ntx_queue = hw->func_caps.num_tx_qp;
1058 1058
1059 1059 i40e->i40e_uaddrs = kmem_zalloc(sizeof (i40e_uaddr_t) *
1060 1060 i40e->i40e_resources.ifr_nmacfilt, KM_SLEEP);
1061 1061 i40e->i40e_maddrs = kmem_zalloc(sizeof (i40e_maddr_t) *
1062 1062 i40e->i40e_resources.ifr_nmcastfilt, KM_SLEEP);
1063 1063
1064 1064 /*
1065 1065 * Initialize these as multicast addresses to indicate it's invalid for
1066 1066 * sanity purposes. Think of it like 0xdeadbeef.
1067 1067 */
1068 1068 for (i = 0; i < i40e->i40e_resources.ifr_nmacfilt; i++)
1069 1069 i40e->i40e_uaddrs[i].iua_mac[0] = 0x01;
1070 1070
1071 1071 return (B_TRUE);
1072 1072 }
1073 1073
1074 1074 static boolean_t
1075 1075 i40e_enable_interrupts(i40e_t *i40e)
1076 1076 {
1077 1077 int i, rc;
1078 1078
1079 1079 if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) {
1080 1080 rc = ddi_intr_block_enable(i40e->i40e_intr_handles,
1081 1081 i40e->i40e_intr_count);
1082 1082 if (rc != DDI_SUCCESS) {
1083 1083 i40e_error(i40e, "Interrupt block-enable failed: %d",
1084 1084 rc);
1085 1085 return (B_FALSE);
1086 1086 }
1087 1087 } else {
1088 1088 for (i = 0; i < i40e->i40e_intr_count; i++) {
1089 1089 rc = ddi_intr_enable(i40e->i40e_intr_handles[i]);
1090 1090 if (rc != DDI_SUCCESS) {
1091 1091 i40e_error(i40e,
1092 1092 "Failed to enable interrupt %d: %d", i, rc);
1093 1093 while (--i >= 0) {
1094 1094 (void) ddi_intr_disable(
1095 1095 i40e->i40e_intr_handles[i]);
1096 1096 }
1097 1097 return (B_FALSE);
1098 1098 }
1099 1099 }
1100 1100 }
1101 1101
1102 1102 return (B_TRUE);
1103 1103 }
1104 1104
1105 1105 static boolean_t
1106 1106 i40e_disable_interrupts(i40e_t *i40e)
1107 1107 {
1108 1108 int i, rc;
1109 1109
1110 1110 if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) {
1111 1111 rc = ddi_intr_block_disable(i40e->i40e_intr_handles,
1112 1112 i40e->i40e_intr_count);
1113 1113 if (rc != DDI_SUCCESS) {
1114 1114 i40e_error(i40e,
1115 1115 "Interrupt block-disabled failed: %d", rc);
1116 1116 return (B_FALSE);
1117 1117 }
1118 1118 } else {
1119 1119 for (i = 0; i < i40e->i40e_intr_count; i++) {
1120 1120 rc = ddi_intr_disable(i40e->i40e_intr_handles[i]);
1121 1121 if (rc != DDI_SUCCESS) {
1122 1122 i40e_error(i40e,
1123 1123 "Failed to disable interrupt %d: %d",
1124 1124 i, rc);
1125 1125 return (B_FALSE);
1126 1126 }
1127 1127 }
1128 1128 }
1129 1129
1130 1130 return (B_TRUE);
1131 1131 }
1132 1132
1133 1133 /*
1134 1134 * Free receive & transmit rings.
1135 1135 */
1136 1136 static void
1137 1137 i40e_free_trqpairs(i40e_t *i40e)
1138 1138 {
1139 1139 i40e_trqpair_t *itrq;
1140 1140
1141 1141 if (i40e->i40e_rx_groups != NULL) {
1142 1142 kmem_free(i40e->i40e_rx_groups,
1143 1143 sizeof (i40e_rx_group_t) * i40e->i40e_num_rx_groups);
1144 1144 i40e->i40e_rx_groups = NULL;
1145 1145 }
1146 1146
1147 1147 if (i40e->i40e_trqpairs != NULL) {
1148 1148 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1149 1149 itrq = &i40e->i40e_trqpairs[i];
1150 1150 mutex_destroy(&itrq->itrq_intr_lock);
1151 1151 mutex_destroy(&itrq->itrq_rx_lock);
1152 1152 mutex_destroy(&itrq->itrq_tx_lock);
1153 1153 mutex_destroy(&itrq->itrq_tcb_lock);
1154 1154 cv_destroy(&itrq->itrq_intr_cv);
1155 1155 cv_destroy(&itrq->itrq_tx_cv);
1156 1156
1157 1157 i40e_stats_trqpair_fini(itrq);
1158 1158 }
1159 1159
1160 1160 kmem_free(i40e->i40e_trqpairs,
1161 1161 sizeof (i40e_trqpair_t) * i40e->i40e_num_trqpairs);
1162 1162 i40e->i40e_trqpairs = NULL;
1163 1163 }
1164 1164
1165 1165 cv_destroy(&i40e->i40e_rx_pending_cv);
1166 1166 mutex_destroy(&i40e->i40e_rx_pending_lock);
1167 1167 mutex_destroy(&i40e->i40e_general_lock);
1168 1168 }
1169 1169
1170 1170 /*
1171 1171 * Allocate transmit and receive rings, as well as other data structures that we
1172 1172 * need.
1173 1173 */
1174 1174 static boolean_t
1175 1175 i40e_alloc_trqpairs(i40e_t *i40e)
1176 1176 {
1177 1177 void *mutexpri = DDI_INTR_PRI(i40e->i40e_intr_pri);
1178 1178
1179 1179 /*
1180 1180 * Now that we have the priority for the interrupts, initialize
1181 1181 * all relevant locks.
1182 1182 */
1183 1183 mutex_init(&i40e->i40e_general_lock, NULL, MUTEX_DRIVER, mutexpri);
1184 1184 mutex_init(&i40e->i40e_rx_pending_lock, NULL, MUTEX_DRIVER, mutexpri);
1185 1185 cv_init(&i40e->i40e_rx_pending_cv, NULL, CV_DRIVER, NULL);
1186 1186
1187 1187 i40e->i40e_trqpairs = kmem_zalloc(sizeof (i40e_trqpair_t) *
1188 1188 i40e->i40e_num_trqpairs, KM_SLEEP);
1189 1189 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1190 1190 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i];
1191 1191
1192 1192 itrq->itrq_i40e = i40e;
1193 1193 mutex_init(&itrq->itrq_intr_lock, NULL, MUTEX_DRIVER, mutexpri);
1194 1194 mutex_init(&itrq->itrq_rx_lock, NULL, MUTEX_DRIVER, mutexpri);
1195 1195 mutex_init(&itrq->itrq_tx_lock, NULL, MUTEX_DRIVER, mutexpri);
1196 1196 mutex_init(&itrq->itrq_tcb_lock, NULL, MUTEX_DRIVER, mutexpri);
1197 1197 cv_init(&itrq->itrq_intr_cv, NULL, CV_DRIVER, NULL);
1198 1198 cv_init(&itrq->itrq_tx_cv, NULL, CV_DRIVER, NULL);
1199 1199 itrq->itrq_index = i;
1200 1200 itrq->itrq_intr_quiesce = B_TRUE;
1201 1201 itrq->itrq_tx_quiesce = B_TRUE;
1202 1202 }
1203 1203
1204 1204 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1205 1205 /*
1206 1206 * Keeping this in a separate iteration makes the
1207 1207 * clean up path safe.
1208 1208 */
1209 1209 if (!i40e_stats_trqpair_init(&i40e->i40e_trqpairs[i])) {
1210 1210 i40e_free_trqpairs(i40e);
1211 1211 return (B_FALSE);
1212 1212 }
1213 1213 }
1214 1214
1215 1215 i40e->i40e_rx_groups = kmem_zalloc(sizeof (i40e_rx_group_t) *
1216 1216 i40e->i40e_num_rx_groups, KM_SLEEP);
1217 1217
1218 1218 for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) {
1219 1219 i40e_rx_group_t *rxg = &i40e->i40e_rx_groups[i];
1220 1220
1221 1221 rxg->irg_index = i;
1222 1222 rxg->irg_i40e = i40e;
1223 1223 }
1224 1224
1225 1225 return (B_TRUE);
1226 1226 }
1227 1227
1228 1228
1229 1229
1230 1230 /*
1231 1231 * Unless a .conf file already overrode i40e_t structure values, they will
1232 1232 * be 0, and need to be set in conjunction with the now-available HW report.
1233 1233 */
1234 1234 /* ARGSUSED */
1235 1235 static void
1236 1236 i40e_hw_to_instance(i40e_t *i40e, i40e_hw_t *hw)
1237 1237 {
1238 1238 if (i40e->i40e_num_trqpairs_per_vsi == 0) {
1239 1239 if (i40e_is_x722(i40e)) {
1240 1240 i40e->i40e_num_trqpairs_per_vsi =
1241 1241 I40E_722_MAX_TC_QUEUES;
1242 1242 } else {
1243 1243 i40e->i40e_num_trqpairs_per_vsi =
1244 1244 I40E_710_MAX_TC_QUEUES;
1245 1245 }
1246 1246 }
1247 1247
1248 1248 if (i40e->i40e_num_rx_groups == 0) {
1249 1249 i40e->i40e_num_rx_groups = I40E_DEF_NUM_RX_GROUPS;
1250 1250 }
1251 1251 }
1252 1252
1253 1253 /*
1254 1254 * Free any resources required by, or setup by, the Intel common code.
1255 1255 */
1256 1256 static void
1257 1257 i40e_common_code_fini(i40e_t *i40e)
1258 1258 {
1259 1259 i40e_hw_t *hw = &i40e->i40e_hw_space;
1260 1260 int rc;
1261 1261
1262 1262 rc = i40e_shutdown_lan_hmc(hw);
1263 1263 if (rc != I40E_SUCCESS)
1264 1264 i40e_error(i40e, "failed to shutdown LAN hmc: %d", rc);
1265 1265
1266 1266 rc = i40e_shutdown_adminq(hw);
1267 1267 if (rc != I40E_SUCCESS)
1268 1268 i40e_error(i40e, "failed to shutdown admin queue: %d", rc);
1269 1269 }
1270 1270
1271 1271 /*
1272 1272 * Initialize and call Intel common-code routines, includes some setup
1273 1273 * the common code expects from the driver. Also prints on failure, so
1274 1274 * the caller doesn't have to.
1275 1275 */
1276 1276 static boolean_t
1277 1277 i40e_common_code_init(i40e_t *i40e, i40e_hw_t *hw)
1278 1278 {
1279 1279 int rc;
1280 1280
1281 1281 i40e_clear_hw(hw);
1282 1282 rc = i40e_pf_reset(hw);
1283 1283 if (rc != 0) {
1284 1284 i40e_error(i40e, "failed to reset hardware: %d", rc);
1285 1285 i40e_fm_ereport(i40e, DDI_FM_DEVICE_NO_RESPONSE);
1286 1286 return (B_FALSE);
1287 1287 }
1288 1288
1289 1289 rc = i40e_init_shared_code(hw);
1290 1290 if (rc != 0) {
1291 1291 i40e_error(i40e, "failed to initialize i40e core: %d", rc);
1292 1292 return (B_FALSE);
1293 1293 }
1294 1294
1295 1295 hw->aq.num_arq_entries = I40E_DEF_ADMINQ_SIZE;
1296 1296 hw->aq.num_asq_entries = I40E_DEF_ADMINQ_SIZE;
1297 1297 hw->aq.arq_buf_size = I40E_ADMINQ_BUFSZ;
1298 1298 hw->aq.asq_buf_size = I40E_ADMINQ_BUFSZ;
1299 1299
1300 1300 rc = i40e_init_adminq(hw);
1301 1301 if (rc != 0) {
1302 1302 i40e_error(i40e, "failed to initialize firmware admin queue: "
1303 1303 "%d, potential firmware version mismatch", rc);
1304 1304 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE);
1305 1305 return (B_FALSE);
1306 1306 }
1307 1307
1308 1308 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1309 1309 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
1310 1310 i40e_log(i40e, "The driver for the device detected a newer "
1311 1311 "version of the NVM image (%d.%d) than expected (%d.%d).\n"
1312 1312 "Please install the most recent version of the network "
1313 1313 "driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver,
1314 1314 I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw));
1315 1315 } else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
1316 1316 hw->aq.api_min_ver < (I40E_FW_MINOR_VERSION(hw) - 1)) {
1317 1317 i40e_log(i40e, "The driver for the device detected an older"
1318 1318 " version of the NVM image (%d.%d) than expected (%d.%d)."
1319 1319 "\nPlease update the NVM image.\n",
1320 1320 hw->aq.api_maj_ver, hw->aq.api_min_ver,
1321 1321 I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw) - 1);
1322 1322 }
1323 1323
1324 1324 i40e_clear_pxe_mode(hw);
1325 1325
1326 1326 /*
1327 1327 * We need to call this so that the common code can discover
1328 1328 * capabilities of the hardware, which it uses throughout the rest.
1329 1329 */
1330 1330 if (!i40e_get_hw_capabilities(i40e, hw)) {
1331 1331 i40e_error(i40e, "failed to obtain hardware capabilities");
1332 1332 return (B_FALSE);
1333 1333 }
1334 1334
1335 1335 if (i40e_get_available_resources(i40e) == B_FALSE) {
1336 1336 i40e_error(i40e, "failed to obtain hardware resources");
1337 1337 return (B_FALSE);
1338 1338 }
1339 1339
1340 1340 i40e_hw_to_instance(i40e, hw);
1341 1341
1342 1342 rc = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1343 1343 hw->func_caps.num_rx_qp, 0, 0);
1344 1344 if (rc != 0) {
1345 1345 i40e_error(i40e, "failed to initialize hardware memory cache: "
1346 1346 "%d", rc);
1347 1347 return (B_FALSE);
1348 1348 }
1349 1349
1350 1350 rc = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1351 1351 if (rc != 0) {
1352 1352 i40e_error(i40e, "failed to configure hardware memory cache: "
1353 1353 "%d", rc);
1354 1354 return (B_FALSE);
1355 1355 }
1356 1356
1357 1357 (void) i40e_aq_stop_lldp(hw, TRUE, FALSE, NULL);
1358 1358
1359 1359 rc = i40e_get_mac_addr(hw, hw->mac.addr);
1360 1360 if (rc != I40E_SUCCESS) {
1361 1361 i40e_error(i40e, "failed to retrieve hardware mac address: %d",
1362 1362 rc);
1363 1363 return (B_FALSE);
1364 1364 }
1365 1365
1366 1366 rc = i40e_validate_mac_addr(hw->mac.addr);
1367 1367 if (rc != 0) {
1368 1368 i40e_error(i40e, "failed to validate internal mac address: "
1369 1369 "%d", rc);
1370 1370 return (B_FALSE);
1371 1371 }
1372 1372 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
1373 1373 if ((rc = i40e_get_port_mac_addr(hw, hw->mac.port_addr)) !=
1374 1374 I40E_SUCCESS) {
1375 1375 i40e_error(i40e, "failed to retrieve port mac address: %d",
1376 1376 rc);
1377 1377 return (B_FALSE);
1378 1378 }
1379 1379
1380 1380 /*
1381 1381 * We need to obtain the Default Virtual Station SEID (VSI)
1382 1382 * before we can perform other operations on the device.
1383 1383 */
1384 1384 if (!i40e_set_def_vsi_seid(i40e)) {
1385 1385 i40e_error(i40e, "failed to obtain Default VSI SEID");
1386 1386 return (B_FALSE);
1387 1387 }
1388 1388
1389 1389 return (B_TRUE);
1390 1390 }
1391 1391
1392 1392 static void
1393 1393 i40e_unconfigure(dev_info_t *devinfo, i40e_t *i40e)
1394 1394 {
1395 1395 int rc;
1396 1396
1397 1397 if (i40e->i40e_attach_progress & I40E_ATTACH_ENABLE_INTR)
1398 1398 (void) i40e_disable_interrupts(i40e);
1399 1399
1400 1400 if ((i40e->i40e_attach_progress & I40E_ATTACH_LINK_TIMER) &&
1401 1401 i40e->i40e_periodic_id != 0) {
1402 1402 ddi_periodic_delete(i40e->i40e_periodic_id);
1403 1403 i40e->i40e_periodic_id = 0;
1404 1404 }
1405 1405
1406 1406 if (i40e->i40e_attach_progress & I40E_ATTACH_UFM_INIT)
1407 1407 ddi_ufm_fini(i40e->i40e_ufmh);
1408 1408
1409 1409 if (i40e->i40e_attach_progress & I40E_ATTACH_MAC) {
1410 1410 rc = mac_unregister(i40e->i40e_mac_hdl);
1411 1411 if (rc != 0) {
1412 1412 i40e_error(i40e, "failed to unregister from mac: %d",
1413 1413 rc);
1414 1414 }
1415 1415 }
1416 1416
1417 1417 if (i40e->i40e_attach_progress & I40E_ATTACH_STATS) {
1418 1418 i40e_stats_fini(i40e);
1419 1419 }
1420 1420
1421 1421 if (i40e->i40e_attach_progress & I40E_ATTACH_ADD_INTR)
1422 1422 i40e_rem_intr_handlers(i40e);
1423 1423
1424 1424 if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_RINGSLOCKS)
1425 1425 i40e_free_trqpairs(i40e);
1426 1426
1427 1427 if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_INTR)
1428 1428 i40e_rem_intrs(i40e);
1429 1429
1430 1430 if (i40e->i40e_attach_progress & I40E_ATTACH_COMMON_CODE)
1431 1431 i40e_common_code_fini(i40e);
1432 1432
1433 1433 i40e_cleanup_resources(i40e);
1434 1434
1435 1435 if (i40e->i40e_attach_progress & I40E_ATTACH_PROPS)
1436 1436 (void) ddi_prop_remove_all(devinfo);
1437 1437
1438 1438 if (i40e->i40e_attach_progress & I40E_ATTACH_REGS_MAP &&
1439 1439 i40e->i40e_osdep_space.ios_reg_handle != NULL) {
1440 1440 ddi_regs_map_free(&i40e->i40e_osdep_space.ios_reg_handle);
1441 1441 i40e->i40e_osdep_space.ios_reg_handle = NULL;
1442 1442 }
1443 1443
1444 1444 if ((i40e->i40e_attach_progress & I40E_ATTACH_PCI_CONFIG) &&
1445 1445 i40e->i40e_osdep_space.ios_cfg_handle != NULL) {
1446 1446 pci_config_teardown(&i40e->i40e_osdep_space.ios_cfg_handle);
1447 1447 i40e->i40e_osdep_space.ios_cfg_handle = NULL;
1448 1448 }
1449 1449
1450 1450 if (i40e->i40e_attach_progress & I40E_ATTACH_FM_INIT)
1451 1451 i40e_fm_fini(i40e);
1452 1452
1453 1453 kmem_free(i40e->i40e_aqbuf, I40E_ADMINQ_BUFSZ);
1454 1454 kmem_free(i40e, sizeof (i40e_t));
1455 1455
1456 1456 ddi_set_driver_private(devinfo, NULL);
1457 1457 }
1458 1458
1459 1459 static boolean_t
1460 1460 i40e_final_init(i40e_t *i40e)
1461 1461 {
1462 1462 i40e_hw_t *hw = &i40e->i40e_hw_space;
1463 1463 struct i40e_osdep *osdep = OS_DEP(hw);
1464 1464 uint8_t pbanum[I40E_PBANUM_STRLEN];
1465 1465 enum i40e_status_code irc;
1466 1466 char buf[I40E_DDI_PROP_LEN];
1467 1467
1468 1468 pbanum[0] = '\0';
1469 1469 irc = i40e_read_pba_string(hw, pbanum, sizeof (pbanum));
1470 1470 if (irc != I40E_SUCCESS) {
1471 1471 i40e_log(i40e, "failed to read PBA string: %d", irc);
1472 1472 } else {
1473 1473 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1474 1474 "printed-board-assembly", (char *)pbanum);
1475 1475 }
1476 1476
1477 1477 #ifdef DEBUG
1478 1478 ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.fw_maj_ver,
1479 1479 hw->aq.fw_min_ver) < sizeof (buf));
1480 1480 ASSERT(snprintf(NULL, 0, "%x", hw->aq.fw_build) < sizeof (buf));
1481 1481 ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.api_maj_ver,
1482 1482 hw->aq.api_min_ver) < sizeof (buf));
1483 1483 #endif
1484 1484
1485 1485 (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.fw_maj_ver,
1486 1486 hw->aq.fw_min_ver);
1487 1487 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1488 1488 "firmware-version", buf);
1489 1489 (void) snprintf(buf, sizeof (buf), "%x", hw->aq.fw_build);
1490 1490 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1491 1491 "firmware-build", buf);
1492 1492 (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.api_maj_ver,
1493 1493 hw->aq.api_min_ver);
1494 1494 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1495 1495 "api-version", buf);
1496 1496
1497 1497 if (!i40e_set_hw_bus_info(hw))
1498 1498 return (B_FALSE);
1499 1499
1500 1500 if (i40e_check_acc_handle(osdep->ios_reg_handle) != DDI_FM_OK) {
1501 1501 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
1502 1502 return (B_FALSE);
1503 1503 }
1504 1504
1505 1505 return (B_TRUE);
1506 1506 }
1507 1507
1508 1508 static void
1509 1509 i40e_identify_hardware(i40e_t *i40e)
1510 1510 {
1511 1511 i40e_hw_t *hw = &i40e->i40e_hw_space;
1512 1512 struct i40e_osdep *osdep = &i40e->i40e_osdep_space;
1513 1513
1514 1514 hw->vendor_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_VENID);
1515 1515 hw->device_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_DEVID);
1516 1516 hw->revision_id = pci_config_get8(osdep->ios_cfg_handle,
1517 1517 PCI_CONF_REVID);
1518 1518 hw->subsystem_device_id =
1519 1519 pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBSYSID);
1520 1520 hw->subsystem_vendor_id =
1521 1521 pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBVENID);
1522 1522
1523 1523 /*
1524 1524 * Note that we set the hardware's bus information later on, in
1525 1525 * i40e_get_available_resources(). The common code doesn't seem to
1526 1526 * require that it be set in any ways, it seems to be mostly for
1527 1527 * book-keeping.
1528 1528 */
1529 1529 }
1530 1530
1531 1531 static boolean_t
1532 1532 i40e_regs_map(i40e_t *i40e)
1533 1533 {
1534 1534 dev_info_t *devinfo = i40e->i40e_dip;
1535 1535 i40e_hw_t *hw = &i40e->i40e_hw_space;
1536 1536 struct i40e_osdep *osdep = &i40e->i40e_osdep_space;
1537 1537 off_t memsize;
1538 1538 int ret;
1539 1539
1540 1540 if (ddi_dev_regsize(devinfo, I40E_ADAPTER_REGSET, &memsize) !=
1541 1541 DDI_SUCCESS) {
1542 1542 i40e_error(i40e, "Used invalid register set to map PCIe regs");
1543 1543 return (B_FALSE);
1544 1544 }
1545 1545
1546 1546 if ((ret = ddi_regs_map_setup(devinfo, I40E_ADAPTER_REGSET,
1547 1547 (caddr_t *)&hw->hw_addr, 0, memsize, &i40e_regs_acc_attr,
1548 1548 &osdep->ios_reg_handle)) != DDI_SUCCESS) {
1549 1549 i40e_error(i40e, "failed to map device registers: %d", ret);
1550 1550 return (B_FALSE);
1551 1551 }
1552 1552
1553 1553 osdep->ios_reg_size = memsize;
1554 1554 return (B_TRUE);
1555 1555 }
1556 1556
1557 1557 /*
1558 1558 * Update parameters required when a new MTU has been configured. Calculate the
1559 1559 * maximum frame size, as well as, size our DMA buffers which we size in
1560 1560 * increments of 1K.
1561 1561 */
1562 1562 void
1563 1563 i40e_update_mtu(i40e_t *i40e)
1564 1564 {
1565 1565 uint32_t rx, tx;
1566 1566
1567 1567 i40e->i40e_frame_max = i40e->i40e_sdu +
1568 1568 sizeof (struct ether_vlan_header) + ETHERFCSL;
1569 1569
1570 1570 rx = i40e->i40e_frame_max + I40E_BUF_IPHDR_ALIGNMENT;
1571 1571 i40e->i40e_rx_buf_size = ((rx >> 10) +
1572 1572 ((rx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10;
1573 1573
1574 1574 tx = i40e->i40e_frame_max;
1575 1575 i40e->i40e_tx_buf_size = ((tx >> 10) +
1576 1576 ((tx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10;
1577 1577 }
1578 1578
1579 1579 static int
1580 1580 i40e_get_prop(i40e_t *i40e, char *prop, int min, int max, int def)
1581 1581 {
1582 1582 int val;
1583 1583
1584 1584 val = ddi_prop_get_int(DDI_DEV_T_ANY, i40e->i40e_dip, DDI_PROP_DONTPASS,
1585 1585 prop, def);
1586 1586 if (val > max)
1587 1587 val = max;
1588 1588 if (val < min)
1589 1589 val = min;
1590 1590 return (val);
1591 1591 }
1592 1592
1593 1593 static void
1594 1594 i40e_init_properties(i40e_t *i40e)
1595 1595 {
1596 1596 i40e->i40e_sdu = i40e_get_prop(i40e, "default_mtu",
1597 1597 I40E_MIN_MTU, I40E_MAX_MTU, I40E_DEF_MTU);
1598 1598
1599 1599 i40e->i40e_intr_force = i40e_get_prop(i40e, "intr_force",
1600 1600 I40E_INTR_NONE, I40E_INTR_LEGACY, I40E_INTR_NONE);
1601 1601
1602 1602 i40e->i40e_mr_enable = i40e_get_prop(i40e, "mr_enable",
1603 1603 B_FALSE, B_TRUE, B_TRUE);
1604 1604
1605 1605 i40e->i40e_tx_ring_size = i40e_get_prop(i40e, "tx_ring_size",
1606 1606 I40E_MIN_TX_RING_SIZE, I40E_MAX_TX_RING_SIZE,
1607 1607 I40E_DEF_TX_RING_SIZE);
1608 1608 if ((i40e->i40e_tx_ring_size % I40E_DESC_ALIGN) != 0) {
1609 1609 i40e->i40e_tx_ring_size = P2ROUNDUP(i40e->i40e_tx_ring_size,
1610 1610 I40E_DESC_ALIGN);
1611 1611 }
1612 1612
1613 1613 i40e->i40e_tx_block_thresh = i40e_get_prop(i40e, "tx_resched_threshold",
1614 1614 I40E_MIN_TX_BLOCK_THRESH,
1615 1615 i40e->i40e_tx_ring_size - I40E_TX_MAX_COOKIE,
1616 1616 I40E_DEF_TX_BLOCK_THRESH);
1617 1617
1618 1618 i40e->i40e_num_rx_groups = i40e_get_prop(i40e, "rx_num_groups",
1619 1619 I40E_MIN_NUM_RX_GROUPS, I40E_MAX_NUM_RX_GROUPS,
1620 1620 I40E_DEF_NUM_RX_GROUPS);
1621 1621
1622 1622 i40e->i40e_rx_ring_size = i40e_get_prop(i40e, "rx_ring_size",
1623 1623 I40E_MIN_RX_RING_SIZE, I40E_MAX_RX_RING_SIZE,
1624 1624 I40E_DEF_RX_RING_SIZE);
1625 1625 if ((i40e->i40e_rx_ring_size % I40E_DESC_ALIGN) != 0) {
1626 1626 i40e->i40e_rx_ring_size = P2ROUNDUP(i40e->i40e_rx_ring_size,
1627 1627 I40E_DESC_ALIGN);
1628 1628 }
1629 1629
1630 1630 i40e->i40e_rx_limit_per_intr = i40e_get_prop(i40e, "rx_limit_per_intr",
1631 1631 I40E_MIN_RX_LIMIT_PER_INTR, I40E_MAX_RX_LIMIT_PER_INTR,
1632 1632 I40E_DEF_RX_LIMIT_PER_INTR);
1633 1633
1634 1634 i40e->i40e_tx_hcksum_enable = i40e_get_prop(i40e, "tx_hcksum_enable",
1635 1635 B_FALSE, B_TRUE, B_TRUE);
1636 1636
1637 1637 i40e->i40e_tx_lso_enable = i40e_get_prop(i40e, "tx_lso_enable",
1638 1638 B_FALSE, B_TRUE, B_TRUE);
1639 1639
1640 1640 i40e->i40e_rx_hcksum_enable = i40e_get_prop(i40e, "rx_hcksum_enable",
1641 1641 B_FALSE, B_TRUE, B_TRUE);
1642 1642
1643 1643 i40e->i40e_rx_dma_min = i40e_get_prop(i40e, "rx_dma_threshold",
1644 1644 I40E_MIN_RX_DMA_THRESH, I40E_MAX_RX_DMA_THRESH,
1645 1645 I40E_DEF_RX_DMA_THRESH);
1646 1646
1647 1647 i40e->i40e_tx_dma_min = i40e_get_prop(i40e, "tx_dma_threshold",
1648 1648 I40E_MIN_TX_DMA_THRESH, I40E_MAX_TX_DMA_THRESH,
1649 1649 I40E_DEF_TX_DMA_THRESH);
1650 1650
1651 1651 i40e->i40e_tx_itr = i40e_get_prop(i40e, "tx_intr_throttle",
1652 1652 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_TX_ITR);
1653 1653
1654 1654 i40e->i40e_rx_itr = i40e_get_prop(i40e, "rx_intr_throttle",
1655 1655 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_RX_ITR);
1656 1656
1657 1657 i40e->i40e_other_itr = i40e_get_prop(i40e, "other_intr_throttle",
1658 1658 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_OTHER_ITR);
1659 1659
1660 1660 if (!i40e->i40e_mr_enable) {
1661 1661 i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX;
1662 1662 i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX;
1663 1663 }
1664 1664
1665 1665 i40e_update_mtu(i40e);
1666 1666 }
1667 1667
1668 1668 /*
1669 1669 * There are a few constraints on interrupts that we're currently imposing, some
1670 1670 * of which are restrictions from hardware. For a fuller treatment, see
1671 1671 * i40e_intr.c.
1672 1672 *
1673 1673 * Currently, to use MSI-X we require two interrupts be available though in
1674 1674 * theory we should participate in IRM and happily use more interrupts.
1675 1675 *
1676 1676 * Hardware only supports a single MSI being programmed and therefore if we
1677 1677 * don't have MSI-X interrupts available at this time, then we ratchet down the
1678 1678 * number of rings and groups available. Obviously, we only bother with a single
1679 1679 * fixed interrupt.
1680 1680 */
1681 1681 static boolean_t
1682 1682 i40e_alloc_intr_handles(i40e_t *i40e, dev_info_t *devinfo, int intr_type)
1683 1683 {
1684 1684 i40e_hw_t *hw = &i40e->i40e_hw_space;
1685 1685 ddi_acc_handle_t rh = i40e->i40e_osdep_space.ios_reg_handle;
1686 1686 int request, count, actual, rc, min;
1687 1687 uint32_t reg;
1688 1688
1689 1689 switch (intr_type) {
1690 1690 case DDI_INTR_TYPE_FIXED:
1691 1691 case DDI_INTR_TYPE_MSI:
1692 1692 request = 1;
1693 1693 min = 1;
1694 1694 break;
1695 1695 case DDI_INTR_TYPE_MSIX:
1696 1696 min = 2;
1697 1697 if (!i40e->i40e_mr_enable) {
1698 1698 request = 2;
1699 1699 break;
1700 1700 }
1701 1701 reg = I40E_READ_REG(hw, I40E_GLPCI_CNF2);
1702 1702 /*
1703 1703 * Should this read fail, we will drop back to using
1704 1704 * MSI or fixed interrupts.
1705 1705 */
1706 1706 if (i40e_check_acc_handle(rh) != DDI_FM_OK) {
1707 1707 ddi_fm_service_impact(i40e->i40e_dip,
1708 1708 DDI_SERVICE_DEGRADED);
1709 1709 return (B_FALSE);
1710 1710 }
1711 1711 request = (reg & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1712 1712 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1713 1713 request++; /* the register value is n - 1 */
1714 1714 break;
1715 1715 default:
1716 1716 panic("bad interrupt type passed to i40e_alloc_intr_handles: "
1717 1717 "%d", intr_type);
1718 1718 }
1719 1719
1720 1720 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
1721 1721 if (rc != DDI_SUCCESS || count < min) {
1722 1722 i40e_log(i40e, "Get interrupt number failed, "
1723 1723 "returned %d, count %d", rc, count);
1724 1724 return (B_FALSE);
1725 1725 }
1726 1726
1727 1727 rc = ddi_intr_get_navail(devinfo, intr_type, &count);
1728 1728 if (rc != DDI_SUCCESS || count < min) {
1729 1729 i40e_log(i40e, "Get AVAILABLE interrupt number failed, "
1730 1730 "returned %d, count %d", rc, count);
1731 1731 return (B_FALSE);
1732 1732 }
1733 1733
1734 1734 actual = 0;
1735 1735 i40e->i40e_intr_count = 0;
1736 1736 i40e->i40e_intr_count_max = 0;
1737 1737 i40e->i40e_intr_count_min = 0;
1738 1738
1739 1739 i40e->i40e_intr_size = request * sizeof (ddi_intr_handle_t);
1740 1740 ASSERT(i40e->i40e_intr_size != 0);
1741 1741 i40e->i40e_intr_handles = kmem_alloc(i40e->i40e_intr_size, KM_SLEEP);
1742 1742
1743 1743 rc = ddi_intr_alloc(devinfo, i40e->i40e_intr_handles, intr_type, 0,
1744 1744 min(request, count), &actual, DDI_INTR_ALLOC_NORMAL);
1745 1745 if (rc != DDI_SUCCESS) {
1746 1746 i40e_log(i40e, "Interrupt allocation failed with %d.", rc);
1747 1747 goto alloc_handle_fail;
1748 1748 }
1749 1749
1750 1750 i40e->i40e_intr_count = actual;
1751 1751 i40e->i40e_intr_count_max = request;
1752 1752 i40e->i40e_intr_count_min = min;
1753 1753
1754 1754 if (actual < min) {
1755 1755 i40e_log(i40e, "actual (%d) is less than minimum (%d).",
1756 1756 actual, min);
1757 1757 goto alloc_handle_fail;
1758 1758 }
1759 1759
1760 1760 /*
1761 1761 * Record the priority and capabilities for our first vector. Once
1762 1762 * we have it, that's our priority until detach time. Even if we
1763 1763 * eventually participate in IRM, our priority shouldn't change.
1764 1764 */
1765 1765 rc = ddi_intr_get_pri(i40e->i40e_intr_handles[0], &i40e->i40e_intr_pri);
1766 1766 if (rc != DDI_SUCCESS) {
1767 1767 i40e_log(i40e,
1768 1768 "Getting interrupt priority failed with %d.", rc);
1769 1769 goto alloc_handle_fail;
1770 1770 }
1771 1771
1772 1772 rc = ddi_intr_get_cap(i40e->i40e_intr_handles[0], &i40e->i40e_intr_cap);
1773 1773 if (rc != DDI_SUCCESS) {
1774 1774 i40e_log(i40e,
1775 1775 "Getting interrupt capabilities failed with %d.", rc);
1776 1776 goto alloc_handle_fail;
1777 1777 }
1778 1778
1779 1779 i40e->i40e_intr_type = intr_type;
1780 1780 return (B_TRUE);
1781 1781
1782 1782 alloc_handle_fail:
1783 1783
1784 1784 i40e_rem_intrs(i40e);
1785 1785 return (B_FALSE);
1786 1786 }
1787 1787
1788 1788 static boolean_t
1789 1789 i40e_alloc_intrs(i40e_t *i40e, dev_info_t *devinfo)
1790 1790 {
1791 1791 i40e_hw_t *hw = &i40e->i40e_hw_space;
1792 1792 int intr_types, rc;
1793 1793 uint_t max_trqpairs;
1794 1794
1795 1795 if (i40e_is_x722(i40e)) {
1796 1796 max_trqpairs = I40E_722_MAX_TC_QUEUES;
1797 1797 } else {
1798 1798 max_trqpairs = I40E_710_MAX_TC_QUEUES;
1799 1799 }
1800 1800
1801 1801 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
1802 1802 if (rc != DDI_SUCCESS) {
1803 1803 i40e_error(i40e, "failed to get supported interrupt types: %d",
1804 1804 rc);
1805 1805 return (B_FALSE);
1806 1806 }
1807 1807
1808 1808 i40e->i40e_intr_type = 0;
1809 1809
1810 1810 /*
1811 1811 * We need to determine the number of queue pairs per traffic
1812 1812 * class. We only have one traffic class (TC0), so we'll base
1813 1813 * this off the number of interrupts provided. Furthermore,
1814 1814 * since we only use one traffic class, the number of queues
1815 1815 * per traffic class and per VSI are the same.
1816 1816 */
1817 1817 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
1818 1818 (i40e->i40e_intr_force <= I40E_INTR_MSIX) &&
1819 1819 (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSIX))) {
1820 1820 uint32_t n, qp_cap, num_trqpairs;
1821 1821
1822 1822 /*
1823 1823 * While we want the number of queue pairs to match
1824 1824 * the number of interrupts, we must keep stay in
1825 1825 * bounds of the maximum number of queues per traffic
1826 1826 * class. We subtract one from i40e_intr_count to
1827 1827 * account for interrupt zero; which is currently
1828 1828 * restricted to admin queue commands and other
1829 1829 * interrupt causes.
1830 1830 */
1831 1831 n = MIN(i40e->i40e_intr_count - 1, max_trqpairs);
1832 1832 ASSERT3U(n, >, 0);
1833 1833
1834 1834 /*
1835 1835 * Round up to the nearest power of two to ensure that
1836 1836 * the QBASE aligns with the TC size which must be
1837 1837 * programmed as a power of two. See the queue mapping
1838 1838 * description in section 7.4.9.5.5.1.
1839 1839 *
1840 1840 * If i40e_intr_count - 1 is not a power of two then
1841 1841 * some queue pairs on the same VSI will have to share
1842 1842 * an interrupt.
1843 1843 *
1844 1844 * We may want to revisit this logic in a future where
1845 1845 * we have more interrupts and more VSIs. Otherwise,
1846 1846 * each VSI will use as many interrupts as possible.
1847 1847 * Using more QPs per VSI means better RSS for each
1848 1848 * group, but at the same time may require more
1849 1849 * sharing of interrupts across VSIs. This may be a
1850 1850 * good candidate for a .conf tunable.
1851 1851 */
1852 1852 n = 0x1 << ddi_fls(n);
1853 1853 i40e->i40e_num_trqpairs_per_vsi = n;
1854 1854
1855 1855 /*
1856 1856 * Make sure the number of tx/rx qpairs does not exceed
1857 1857 * the device's capabilities.
1858 1858 */
1859 1859 ASSERT3U(i40e->i40e_num_rx_groups, >, 0);
1860 1860 qp_cap = MIN(hw->func_caps.num_rx_qp, hw->func_caps.num_tx_qp);
1861 1861 num_trqpairs = i40e->i40e_num_trqpairs_per_vsi *
1862 1862 i40e->i40e_num_rx_groups;
1863 1863 if (num_trqpairs > qp_cap) {
1864 1864 i40e->i40e_num_rx_groups = MAX(1, qp_cap /
1865 1865 i40e->i40e_num_trqpairs_per_vsi);
1866 1866 num_trqpairs = i40e->i40e_num_trqpairs_per_vsi *
1867 1867 i40e->i40e_num_rx_groups;
1868 1868 i40e_log(i40e, "Rx groups restricted to %u",
1869 1869 i40e->i40e_num_rx_groups);
1870 1870 }
1871 1871 ASSERT3U(num_trqpairs, >, 0);
1872 1872 i40e->i40e_num_trqpairs = num_trqpairs;
1873 1873 return (B_TRUE);
1874 1874 }
1875 1875
1876 1876 /*
1877 1877 * We only use multiple transmit/receive pairs when MSI-X interrupts are
1878 1878 * available due to the fact that the device basically only supports a
1879 1879 * single MSI interrupt.
1880 1880 */
1881 1881 i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX;
1882 1882 i40e->i40e_num_trqpairs_per_vsi = i40e->i40e_num_trqpairs;
1883 1883 i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX;
1884 1884
1885 1885 if ((intr_types & DDI_INTR_TYPE_MSI) &&
1886 1886 (i40e->i40e_intr_force <= I40E_INTR_MSI)) {
1887 1887 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSI))
1888 1888 return (B_TRUE);
1889 1889 }
1890 1890
1891 1891 if (intr_types & DDI_INTR_TYPE_FIXED) {
1892 1892 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_FIXED))
1893 1893 return (B_TRUE);
1894 1894 }
1895 1895
1896 1896 return (B_FALSE);
1897 1897 }
1898 1898
1899 1899 /*
1900 1900 * Map different interrupts to MSI-X vectors.
1901 1901 */
1902 1902 static boolean_t
1903 1903 i40e_map_intrs_to_vectors(i40e_t *i40e)
1904 1904 {
1905 1905 if (i40e->i40e_intr_type != DDI_INTR_TYPE_MSIX) {
1906 1906 return (B_TRUE);
1907 1907 }
1908 1908
1909 1909 /*
1910 1910 * Each queue pair is mapped to a single interrupt, so
1911 1911 * transmit and receive interrupts for a given queue share the
1912 1912 * same vector. Vector zero is reserved for the admin queue.
1913 1913 */
1914 1914 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1915 1915 uint_t vector = i % (i40e->i40e_intr_count - 1);
1916 1916
1917 1917 i40e->i40e_trqpairs[i].itrq_rx_intrvec = vector + 1;
1918 1918 i40e->i40e_trqpairs[i].itrq_tx_intrvec = vector + 1;
1919 1919 }
1920 1920
1921 1921 return (B_TRUE);
1922 1922 }
1923 1923
1924 1924 static boolean_t
1925 1925 i40e_add_intr_handlers(i40e_t *i40e)
1926 1926 {
1927 1927 int rc, vector;
1928 1928
1929 1929 switch (i40e->i40e_intr_type) {
1930 1930 case DDI_INTR_TYPE_MSIX:
1931 1931 for (vector = 0; vector < i40e->i40e_intr_count; vector++) {
1932 1932 rc = ddi_intr_add_handler(
1933 1933 i40e->i40e_intr_handles[vector],
1934 1934 (ddi_intr_handler_t *)i40e_intr_msix, i40e,
1935 1935 (void *)(uintptr_t)vector);
1936 1936 if (rc != DDI_SUCCESS) {
1937 1937 i40e_log(i40e, "Add interrupt handler (MSI-X) "
1938 1938 "failed: return %d, vector %d", rc, vector);
1939 1939 for (vector--; vector >= 0; vector--) {
1940 1940 (void) ddi_intr_remove_handler(
1941 1941 i40e->i40e_intr_handles[vector]);
1942 1942 }
1943 1943 return (B_FALSE);
1944 1944 }
1945 1945 }
1946 1946 break;
1947 1947 case DDI_INTR_TYPE_MSI:
1948 1948 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0],
1949 1949 (ddi_intr_handler_t *)i40e_intr_msi, i40e, NULL);
1950 1950 if (rc != DDI_SUCCESS) {
1951 1951 i40e_log(i40e, "Add interrupt handler (MSI) failed: "
1952 1952 "return %d", rc);
1953 1953 return (B_FALSE);
1954 1954 }
1955 1955 break;
1956 1956 case DDI_INTR_TYPE_FIXED:
1957 1957 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0],
1958 1958 (ddi_intr_handler_t *)i40e_intr_legacy, i40e, NULL);
1959 1959 if (rc != DDI_SUCCESS) {
1960 1960 i40e_log(i40e, "Add interrupt handler (legacy) failed:"
1961 1961 " return %d", rc);
1962 1962 return (B_FALSE);
1963 1963 }
1964 1964 break;
1965 1965 default:
1966 1966 /* Cast to pacify lint */
1967 1967 panic("i40e_intr_type %p contains an unknown type: %d",
1968 1968 (void *)i40e, i40e->i40e_intr_type);
1969 1969 }
1970 1970
1971 1971 return (B_TRUE);
1972 1972 }
1973 1973
1974 1974 /*
1975 1975 * Perform periodic checks. Longer term, we should be thinking about additional
1976 1976 * things here:
1977 1977 *
1978 1978 * o Stall Detection
1979 1979 * o Temperature sensor detection
1980 1980 * o Device resetting
1981 1981 * o Statistics updating to avoid wraparound
1982 1982 */
1983 1983 static void
1984 1984 i40e_timer(void *arg)
1985 1985 {
1986 1986 i40e_t *i40e = arg;
1987 1987
1988 1988 mutex_enter(&i40e->i40e_general_lock);
1989 1989 i40e_link_check(i40e);
1990 1990 mutex_exit(&i40e->i40e_general_lock);
1991 1991 }
1992 1992
1993 1993 /*
1994 1994 * Get the hardware state, and scribble away anything that needs scribbling.
1995 1995 */
1996 1996 static void
1997 1997 i40e_get_hw_state(i40e_t *i40e, i40e_hw_t *hw)
1998 1998 {
1999 1999 int rc;
2000 2000
2001 2001 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
2002 2002
2003 2003 (void) i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2004 2004 i40e_link_check(i40e);
2005 2005
2006 2006 /*
2007 2007 * Try and determine our PHY. Note that we may have to retry to and
2008 2008 * delay to detect fiber correctly.
2009 2009 */
2010 2010 rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE, &i40e->i40e_phy,
2011 2011 NULL);
2012 2012 if (rc == I40E_ERR_UNKNOWN_PHY) {
2013 2013 i40e_msec_delay(200);
2014 2014 rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE,
2015 2015 &i40e->i40e_phy, NULL);
2016 2016 }
2017 2017
2018 2018 if (rc != I40E_SUCCESS) {
2019 2019 if (rc == I40E_ERR_UNKNOWN_PHY) {
2020 2020 i40e_error(i40e, "encountered unknown PHY type, "
2021 2021 "not attaching.");
2022 2022 } else {
2023 2023 i40e_error(i40e, "error getting physical capabilities: "
2024 2024 "%d, %d", rc, hw->aq.asq_last_status);
2025 2025 }
2026 2026 }
2027 2027
2028 2028 rc = i40e_update_link_info(hw);
2029 2029 if (rc != I40E_SUCCESS) {
2030 2030 i40e_error(i40e, "failed to update link information: %d", rc);
2031 2031 }
2032 2032
2033 2033 /*
2034 2034 * In general, we don't want to mask off (as in stop from being a cause)
2035 2035 * any of the interrupts that the phy might be able to generate.
2036 2036 */
2037 2037 rc = i40e_aq_set_phy_int_mask(hw, 0, NULL);
2038 2038 if (rc != I40E_SUCCESS) {
2039 2039 i40e_error(i40e, "failed to update phy link mask: %d", rc);
2040 2040 }
2041 2041 }
2042 2042
2043 2043 /*
2044 2044 * Go through and re-initialize any existing filters that we may have set up for
2045 2045 * this device. Note that we would only expect them to exist if hardware had
2046 2046 * already been initialized and we had just reset it. While we're not
2047 2047 * implementing this yet, we're keeping this around for when we add reset
2048 2048 * capabilities, so this isn't forgotten.
2049 2049 */
2050 2050 /* ARGSUSED */
2051 2051 static void
2052 2052 i40e_init_macaddrs(i40e_t *i40e, i40e_hw_t *hw)
2053 2053 {
2054 2054 }
2055 2055
2056 2056 /*
2057 2057 * Set the properties which have common values across all the VSIs.
2058 2058 * Consult the "Add VSI" command section (7.4.9.5.5.1) for a
2059 2059 * complete description of these properties.
2060 2060 */
2061 2061 static void
2062 2062 i40e_set_shared_vsi_props(i40e_t *i40e,
2063 2063 struct i40e_aqc_vsi_properties_data *info, uint_t vsi_idx)
2064 2064 {
2065 2065 uint_t tc_queues;
2066 2066 uint16_t vsi_qp_base;
2067 2067
2068 2068 /*
2069 2069 * It's important that we use bitwise-OR here; callers to this
2070 2070 * function might enable other sections before calling this
2071 2071 * function.
2072 2072 */
2073 2073 info->valid_sections |= LE_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID |
2074 2074 I40E_AQ_VSI_PROP_VLAN_VALID);
2075 2075
2076 2076 /*
2077 2077 * Calculate the starting QP index for this VSI. This base is
2078 2078 * relative to the PF queue space; so a value of 0 for PF#1
2079 2079 * represents the absolute index PFLAN_QALLOC_FIRSTQ for PF#1.
2080 2080 */
2081 2081 vsi_qp_base = vsi_idx * i40e->i40e_num_trqpairs_per_vsi;
2082 2082 info->mapping_flags = LE_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2083 2083 info->queue_mapping[0] =
2084 2084 LE_16((vsi_qp_base << I40E_AQ_VSI_QUEUE_SHIFT) &
2085 2085 I40E_AQ_VSI_QUEUE_MASK);
2086 2086
2087 2087 /*
2088 2088 * tc_queues determines the size of the traffic class, where
2089 2089 * the size is 2^^tc_queues to a maximum of 64 for the X710
2090 2090 * and 128 for the X722.
2091 2091 *
2092 2092 * Some examples:
2093 2093 * i40e_num_trqpairs_per_vsi == 1 => tc_queues = 0, 2^^0 = 1.
2094 2094 * i40e_num_trqpairs_per_vsi == 7 => tc_queues = 3, 2^^3 = 8.
2095 2095 * i40e_num_trqpairs_per_vsi == 8 => tc_queues = 3, 2^^3 = 8.
2096 2096 * i40e_num_trqpairs_per_vsi == 9 => tc_queues = 4, 2^^4 = 16.
2097 2097 * i40e_num_trqpairs_per_vsi == 17 => tc_queues = 5, 2^^5 = 32.
2098 2098 * i40e_num_trqpairs_per_vsi == 64 => tc_queues = 6, 2^^6 = 64.
2099 2099 */
2100 2100 tc_queues = ddi_fls(i40e->i40e_num_trqpairs_per_vsi - 1);
2101 2101
2102 2102 /*
2103 2103 * The TC queue mapping is in relation to the VSI queue space.
2104 2104 * Since we are only using one traffic class (TC0) we always
2105 2105 * start at queue offset 0.
2106 2106 */
2107 2107 info->tc_mapping[0] =
2108 2108 LE_16(((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) &
2109 2109 I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
2110 2110 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) &
2111 2111 I40E_AQ_VSI_TC_QUE_NUMBER_MASK));
2112 2112
2113 2113 /*
2114 2114 * I40E_AQ_VSI_PVLAN_MODE_ALL ("VLAN driver insertion mode")
2115 2115 *
2116 2116 * Allow tagged and untagged packets to be sent to this
2117 2117 * VSI from the host.
2118 2118 *
2119 2119 * I40E_AQ_VSI_PVLAN_EMOD_NOTHING ("VLAN and UP expose mode")
2120 2120 *
2121 2121 * Leave the tag on the frame and place no VLAN
2122 2122 * information in the descriptor. We want this mode
2123 2123 * because our MAC layer will take care of the VLAN tag,
2124 2124 * if there is one.
2125 2125 */
2126 2126 info->port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2127 2127 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2128 2128 }
2129 2129
2130 2130 /*
2131 2131 * Delete the VSI at this index, if one exists. We assume there is no
2132 2132 * action we can take if this command fails but to log the failure.
2133 2133 */
2134 2134 static void
2135 2135 i40e_delete_vsi(i40e_t *i40e, uint_t idx)
2136 2136 {
2137 2137 i40e_hw_t *hw = &i40e->i40e_hw_space;
2138 2138 uint16_t seid = i40e->i40e_vsis[idx].iv_seid;
2139 2139
2140 2140 if (seid != 0) {
2141 2141 int rc;
2142 2142
2143 2143 rc = i40e_aq_delete_element(hw, seid, NULL);
2144 2144
2145 2145 if (rc != I40E_SUCCESS) {
2146 2146 i40e_error(i40e, "Failed to delete VSI %d: %d",
2147 2147 rc, hw->aq.asq_last_status);
2148 2148 }
2149 2149
2150 2150 i40e->i40e_vsis[idx].iv_seid = 0;
2151 2151 }
2152 2152 }
2153 2153
2154 2154 /*
2155 2155 * Add a new VSI.
2156 2156 */
2157 2157 static boolean_t
2158 2158 i40e_add_vsi(i40e_t *i40e, i40e_hw_t *hw, uint_t idx)
2159 2159 {
2160 2160 struct i40e_vsi_context ctx;
2161 2161 i40e_rx_group_t *rxg;
2162 2162 int rc;
2163 2163
2164 2164 /*
2165 2165 * The default VSI is created by the controller. This function
2166 2166 * creates new, non-defualt VSIs only.
2167 2167 */
2168 2168 ASSERT3U(idx, !=, 0);
2169 2169
2170 2170 bzero(&ctx, sizeof (struct i40e_vsi_context));
2171 2171 ctx.uplink_seid = i40e->i40e_veb_seid;
2172 2172 ctx.pf_num = hw->pf_id;
2173 2173 ctx.flags = I40E_AQ_VSI_TYPE_PF;
2174 2174 ctx.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
2175 2175 i40e_set_shared_vsi_props(i40e, &ctx.info, idx);
2176 2176
2177 2177 rc = i40e_aq_add_vsi(hw, &ctx, NULL);
2178 2178 if (rc != I40E_SUCCESS) {
2179 2179 i40e_error(i40e, "i40e_aq_add_vsi() failed %d: %d", rc,
2180 2180 hw->aq.asq_last_status);
2181 2181 return (B_FALSE);
2182 2182 }
2183 2183
2184 2184 rxg = &i40e->i40e_rx_groups[idx];
2185 2185 rxg->irg_vsi_seid = ctx.seid;
2186 2186 i40e->i40e_vsis[idx].iv_number = ctx.vsi_number;
2187 2187 i40e->i40e_vsis[idx].iv_seid = ctx.seid;
2188 2188 i40e->i40e_vsis[idx].iv_stats_id = LE_16(ctx.info.stat_counter_idx);
2189 2189
2190 2190 if (i40e_stat_vsi_init(i40e, idx) == B_FALSE)
2191 2191 return (B_FALSE);
2192 2192
2193 2193 return (B_TRUE);
2194 2194 }
2195 2195
2196 2196 /*
2197 2197 * Configure the hardware for the Default Virtual Station Interface (VSI).
2198 2198 */
2199 2199 static boolean_t
2200 2200 i40e_config_def_vsi(i40e_t *i40e, i40e_hw_t *hw)
2201 2201 {
2202 2202 struct i40e_vsi_context ctx;
2203 2203 i40e_rx_group_t *def_rxg;
2204 2204 int err;
2205 2205 struct i40e_aqc_remove_macvlan_element_data filt;
2206 2206
2207 2207 bzero(&ctx, sizeof (struct i40e_vsi_context));
2208 2208 ctx.seid = I40E_DEF_VSI_SEID(i40e);
2209 2209 ctx.pf_num = hw->pf_id;
2210 2210 err = i40e_aq_get_vsi_params(hw, &ctx, NULL);
2211 2211 if (err != I40E_SUCCESS) {
2212 2212 i40e_error(i40e, "get VSI params failed with %d", err);
2213 2213 return (B_FALSE);
2214 2214 }
2215 2215
2216 2216 ctx.info.valid_sections = 0;
2217 2217 i40e->i40e_vsis[0].iv_number = ctx.vsi_number;
2218 2218 i40e->i40e_vsis[0].iv_stats_id = LE_16(ctx.info.stat_counter_idx);
2219 2219 if (i40e_stat_vsi_init(i40e, 0) == B_FALSE)
2220 2220 return (B_FALSE);
2221 2221
2222 2222 i40e_set_shared_vsi_props(i40e, &ctx.info, I40E_DEF_VSI_IDX);
2223 2223
2224 2224 err = i40e_aq_update_vsi_params(hw, &ctx, NULL);
2225 2225 if (err != I40E_SUCCESS) {
2226 2226 i40e_error(i40e, "Update VSI params failed with %d", err);
2227 2227 return (B_FALSE);
2228 2228 }
2229 2229
2230 2230 def_rxg = &i40e->i40e_rx_groups[0];
2231 2231 def_rxg->irg_vsi_seid = I40E_DEF_VSI_SEID(i40e);
2232 2232
2233 2233 /*
2234 2234 * We have seen three different behaviors in regards to the
2235 2235 * Default VSI and its implicit L2 MAC+VLAN filter.
2236 2236 *
2237 2237 * 1. It has an implicit filter for the factory MAC address
2238 2238 * and this filter counts against 'ifr_nmacfilt_used'.
2239 2239 *
2240 2240 * 2. It has an implicit filter for the factory MAC address
2241 2241 * and this filter DOES NOT count against 'ifr_nmacfilt_used'.
2242 2242 *
2243 2243 * 3. It DOES NOT have an implicit filter.
2244 2244 *
2245 2245 * All three of these cases are accounted for below. If we
2246 2246 * fail to remove the L2 filter (ENOENT) then we assume there
2247 2247 * wasn't one. Otherwise, if we successfully remove the
2248 2248 * filter, we make sure to update the 'ifr_nmacfilt_used'
2249 2249 * count accordingly.
2250 2250 *
2251 2251 * We remove this filter to prevent duplicate delivery of
2252 2252 * packets destined for the primary MAC address as DLS will
2253 2253 * create the same filter on a non-default VSI for the primary
2254 2254 * MAC client.
2255 2255 *
2256 2256 * If you change the following code please test it across as
2257 2257 * many X700 series controllers and firmware revisions as you
2258 2258 * can.
2259 2259 */
2260 2260 bzero(&filt, sizeof (filt));
2261 2261 bcopy(hw->mac.port_addr, filt.mac_addr, ETHERADDRL);
2262 2262 filt.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2263 2263 filt.vlan_tag = 0;
2264 2264
2265 2265 ASSERT3U(i40e->i40e_resources.ifr_nmacfilt_used, <=, 1);
2266 2266 i40e_log(i40e, "Num L2 filters: %u",
2267 2267 i40e->i40e_resources.ifr_nmacfilt_used);
2268 2268
2269 2269 err = i40e_aq_remove_macvlan(hw, I40E_DEF_VSI_SEID(i40e), &filt, 1,
2270 2270 NULL);
2271 2271 if (err == I40E_SUCCESS) {
2272 2272 i40e_log(i40e,
2273 2273 "Removed L2 filter from Default VSI with SEID %u",
2274 2274 I40E_DEF_VSI_SEID(i40e));
2275 2275 } else if (hw->aq.asq_last_status == ENOENT) {
2276 2276 i40e_log(i40e,
|
↓ open down ↓ |
1887 lines elided |
↑ open up ↑ |
2277 2277 "No L2 filter for Default VSI with SEID %u",
2278 2278 I40E_DEF_VSI_SEID(i40e));
2279 2279 } else {
2280 2280 i40e_error(i40e, "Failed to remove L2 filter from"
2281 2281 " Default VSI with SEID %u: %d (%d)",
2282 2282 I40E_DEF_VSI_SEID(i40e), err, hw->aq.asq_last_status);
2283 2283
2284 2284 return (B_FALSE);
2285 2285 }
2286 2286
2287 +#if 0
2288 + bzero(&filt, sizeof (filt));
2289 + bcopy(hw->mac.port_addr, filt.mac_addr, ETHERADDRL);
2290 + filt.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2291 + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2292 + filt.vlan_tag = 0;
2293 +
2294 + ASSERT3U(i40e->i40e_resources.ifr_nmacfilt_used, <=, 1);
2295 + i40e_log(i40e, "Num L2 filters (2nd try): %u",
2296 + i40e->i40e_resources.ifr_nmacfilt_used);
2297 +
2298 + err = i40e_aq_remove_macvlan(hw, I40E_DEF_VSI_SEID(i40e), &filt, 1,
2299 + NULL);
2300 + if (err == I40E_SUCCESS) {
2301 + i40e_log(i40e,
2302 + "(2nd try) Removed L2 filter from Default VSI with SEID %u",
2303 + I40E_DEF_VSI_SEID(i40e));
2304 + } else if (hw->aq.asq_last_status == ENOENT) {
2305 + i40e_log(i40e,
2306 + "(2nd try) No L2 filter for Default VSI with SEID %u",
2307 + I40E_DEF_VSI_SEID(i40e));
2308 + } else {
2309 + i40e_error(i40e, "(2nd try) Failed to remove L2 filter from"
2310 + " Default VSI with SEID %u: %d (%d)",
2311 + I40E_DEF_VSI_SEID(i40e), err, hw->aq.asq_last_status);
2312 +
2313 + return (B_FALSE);
2314 + }
2315 +#endif
2287 2316 /*
2288 2317 * As mentioned above, the controller created an implicit L2
2289 2318 * filter for the primary MAC. We want to remove both the
2290 2319 * filter and decrement the filter count. However, not all
2291 2320 * controllers count this implicit filter against the total
2292 2321 * MAC filter count. So here we are making sure it is either
2293 2322 * one or zero. If it is one, then we know it is for the
2294 2323 * implicit filter and we should decrement since we just
2295 2324 * removed the filter above. If it is zero then we know the
2296 2325 * controller that does not count the implicit filter, and it
2297 2326 * was enough to just remove it; we leave the count alone.
2298 2327 * But if it is neither, then we have never seen a controller
2299 2328 * like this before and we should fail to attach.
2300 2329 *
2301 2330 * It is unfortunate that this code must exist but the
2302 2331 * behavior of this implicit L2 filter and its corresponding
2303 2332 * count were dicovered through empirical testing. The
2304 2333 * programming manuals hint at this filter but do not
2305 2334 * explicitly call out the exact behavior.
2306 2335 */
2307 2336 if (i40e->i40e_resources.ifr_nmacfilt_used == 1) {
2308 2337 i40e->i40e_resources.ifr_nmacfilt_used--;
2309 2338 } else {
2310 2339 if (i40e->i40e_resources.ifr_nmacfilt_used != 0) {
2311 2340 i40e_error(i40e, "Unexpected L2 filter count: %u"
2312 2341 " (expected 0)",
2313 2342 i40e->i40e_resources.ifr_nmacfilt_used);
2314 2343 return (B_FALSE);
2315 2344 }
2316 2345 }
2317 2346
2318 2347 return (B_TRUE);
2319 2348 }
2320 2349
2321 2350 static boolean_t
2322 2351 i40e_config_rss_key_x722(i40e_t *i40e, i40e_hw_t *hw)
2323 2352 {
2324 2353 for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) {
2325 2354 uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1];
2326 2355 struct i40e_aqc_get_set_rss_key_data key;
2327 2356 const char *u8seed;
2328 2357 enum i40e_status_code status;
2329 2358 uint16_t vsi_number = i40e->i40e_vsis[i].iv_number;
2330 2359
2331 2360 (void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed));
2332 2361 u8seed = (char *)seed;
2333 2362
2334 2363 CTASSERT(sizeof (key) >= (sizeof (key.standard_rss_key) +
2335 2364 sizeof (key.extended_hash_key)));
2336 2365
2337 2366 bcopy(u8seed, key.standard_rss_key,
2338 2367 sizeof (key.standard_rss_key));
2339 2368 bcopy(&u8seed[sizeof (key.standard_rss_key)],
2340 2369 key.extended_hash_key, sizeof (key.extended_hash_key));
2341 2370
2342 2371 ASSERT3U(vsi_number, !=, 0);
2343 2372 status = i40e_aq_set_rss_key(hw, vsi_number, &key);
2344 2373
2345 2374 if (status != I40E_SUCCESS) {
2346 2375 i40e_error(i40e, "failed to set RSS key for VSI %u: %d",
2347 2376 vsi_number, status);
2348 2377 return (B_FALSE);
2349 2378 }
2350 2379 }
2351 2380
2352 2381 return (B_TRUE);
2353 2382 }
2354 2383
2355 2384 /*
2356 2385 * Configure the RSS key. For the X710 controller family, this is set on a
2357 2386 * per-PF basis via registers. For the X722, this is done on a per-VSI basis
2358 2387 * through the admin queue.
2359 2388 */
2360 2389 static boolean_t
2361 2390 i40e_config_rss_key(i40e_t *i40e, i40e_hw_t *hw)
2362 2391 {
2363 2392 if (i40e_is_x722(i40e)) {
2364 2393 if (!i40e_config_rss_key_x722(i40e, hw))
2365 2394 return (B_FALSE);
2366 2395 } else {
2367 2396 uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1];
2368 2397
2369 2398 (void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed));
2370 2399 for (uint_t i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
2371 2400 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed[i]);
2372 2401 }
2373 2402
2374 2403 return (B_TRUE);
2375 2404 }
2376 2405
2377 2406 /*
2378 2407 * Populate the LUT. The size of each entry in the LUT depends on the controller
2379 2408 * family, with the X722 using a known 7-bit width. On the X710 controller, this
2380 2409 * is programmed through its control registers where as on the X722 this is
2381 2410 * configured through the admin queue. Also of note, the X722 allows the LUT to
2382 2411 * be set on a per-PF or VSI basis. At this time we use the PF setting. If we
2383 2412 * decide to use the per-VSI LUT in the future, then we will need to modify the
2384 2413 * i40e_add_vsi() function to set the RSS LUT bits in the queueing section.
2385 2414 *
2386 2415 * We populate the LUT in a round robin fashion with the rx queue indices from 0
2387 2416 * to i40e_num_trqpairs_per_vsi - 1.
2388 2417 */
2389 2418 static boolean_t
2390 2419 i40e_config_rss_hlut(i40e_t *i40e, i40e_hw_t *hw)
2391 2420 {
2392 2421 uint32_t *hlut;
2393 2422 uint8_t lut_mask;
2394 2423 uint_t i;
2395 2424 boolean_t ret = B_FALSE;
2396 2425
2397 2426 /*
2398 2427 * We always configure the PF with a table size of 512 bytes in
2399 2428 * i40e_chip_start().
2400 2429 */
2401 2430 hlut = kmem_alloc(I40E_HLUT_TABLE_SIZE, KM_NOSLEEP);
2402 2431 if (hlut == NULL) {
2403 2432 i40e_error(i40e, "i40e_config_rss() buffer allocation failed");
2404 2433 return (B_FALSE);
2405 2434 }
2406 2435
2407 2436 /*
2408 2437 * The width of the X722 is apparently defined to be 7 bits, regardless
2409 2438 * of the capability.
2410 2439 */
2411 2440 if (i40e_is_x722(i40e)) {
2412 2441 lut_mask = (1 << 7) - 1;
2413 2442 } else {
2414 2443 lut_mask = (1 << hw->func_caps.rss_table_entry_width) - 1;
2415 2444 }
2416 2445
2417 2446 for (i = 0; i < I40E_HLUT_TABLE_SIZE; i++) {
2418 2447 ((uint8_t *)hlut)[i] =
2419 2448 (i % i40e->i40e_num_trqpairs_per_vsi) & lut_mask;
2420 2449 }
2421 2450
2422 2451 if (i40e_is_x722(i40e)) {
2423 2452 enum i40e_status_code status;
2424 2453
2425 2454 status = i40e_aq_set_rss_lut(hw, 0, B_TRUE, (uint8_t *)hlut,
2426 2455 I40E_HLUT_TABLE_SIZE);
2427 2456
2428 2457 if (status != I40E_SUCCESS) {
2429 2458 i40e_error(i40e, "failed to set RSS LUT %d: %d",
2430 2459 status, hw->aq.asq_last_status);
2431 2460 goto out;
2432 2461 }
2433 2462 } else {
2434 2463 for (i = 0; i < I40E_HLUT_TABLE_SIZE >> 2; i++) {
2435 2464 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), hlut[i]);
2436 2465 }
2437 2466 }
2438 2467 ret = B_TRUE;
2439 2468 out:
2440 2469 kmem_free(hlut, I40E_HLUT_TABLE_SIZE);
2441 2470 return (ret);
2442 2471 }
2443 2472
2444 2473 /*
2445 2474 * Set up RSS.
2446 2475 * 1. Seed the hash key.
2447 2476 * 2. Enable PCTYPEs for the hash filter.
2448 2477 * 3. Populate the LUT.
2449 2478 */
2450 2479 static boolean_t
2451 2480 i40e_config_rss(i40e_t *i40e, i40e_hw_t *hw)
2452 2481 {
2453 2482 uint64_t hena;
2454 2483
2455 2484 /*
2456 2485 * 1. Seed the hash key
2457 2486 */
2458 2487 if (!i40e_config_rss_key(i40e, hw))
2459 2488 return (B_FALSE);
2460 2489
2461 2490 /*
2462 2491 * 2. Configure PCTYPES
2463 2492 */
2464 2493 hena = (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2465 2494 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2466 2495 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2467 2496 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2468 2497 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2469 2498 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2470 2499 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2471 2500 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2472 2501 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2473 2502 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2474 2503 (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2475 2504
2476 2505 /*
2477 2506 * Add additional types supported by the X722 controller.
2478 2507 */
2479 2508 if (i40e_is_x722(i40e)) {
2480 2509 hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
2481 2510 (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
2482 2511 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) |
2483 2512 (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
2484 2513 (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
2485 2514 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
2486 2515 }
2487 2516
2488 2517 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
2489 2518 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
2490 2519
2491 2520 /*
2492 2521 * 3. Populate LUT
2493 2522 */
2494 2523 return (i40e_config_rss_hlut(i40e, hw));
2495 2524 }
2496 2525
2497 2526 /*
2498 2527 * Wrapper to kick the chipset on.
2499 2528 */
2500 2529 static boolean_t
2501 2530 i40e_chip_start(i40e_t *i40e)
2502 2531 {
2503 2532 i40e_hw_t *hw = &i40e->i40e_hw_space;
2504 2533 struct i40e_filter_control_settings filter;
2505 2534 int rc;
2506 2535 uint8_t err;
2507 2536
2508 2537 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
2509 2538 (hw->aq.fw_maj_ver < 4)) {
2510 2539 i40e_msec_delay(75);
2511 2540 if (i40e_aq_set_link_restart_an(hw, TRUE, NULL) !=
2512 2541 I40E_SUCCESS) {
2513 2542 i40e_error(i40e, "failed to restart link: admin queue "
2514 2543 "error: %d", hw->aq.asq_last_status);
2515 2544 return (B_FALSE);
2516 2545 }
2517 2546 }
2518 2547
2519 2548 /* Determine hardware state */
2520 2549 i40e_get_hw_state(i40e, hw);
2521 2550
2522 2551 /* For now, we always disable Ethernet Flow Control. */
2523 2552 hw->fc.requested_mode = I40E_FC_NONE;
2524 2553 rc = i40e_set_fc(hw, &err, B_TRUE);
2525 2554 if (rc != I40E_SUCCESS) {
2526 2555 i40e_error(i40e, "Setting flow control failed, returned %d"
2527 2556 " with error: 0x%x", rc, err);
2528 2557 return (B_FALSE);
2529 2558 }
2530 2559
2531 2560 /* Initialize mac addresses. */
2532 2561 i40e_init_macaddrs(i40e, hw);
2533 2562
2534 2563 /*
2535 2564 * Set up the filter control. If the hash lut size is changed from
2536 2565 * I40E_HASH_LUT_SIZE_512 then I40E_HLUT_TABLE_SIZE and
2537 2566 * i40e_config_rss_hlut() will need to be updated.
2538 2567 */
2539 2568 bzero(&filter, sizeof (filter));
2540 2569 filter.enable_ethtype = TRUE;
2541 2570 filter.enable_macvlan = TRUE;
2542 2571 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
2543 2572
2544 2573 rc = i40e_set_filter_control(hw, &filter);
2545 2574 if (rc != I40E_SUCCESS) {
2546 2575 i40e_error(i40e, "i40e_set_filter_control() returned %d", rc);
2547 2576 return (B_FALSE);
2548 2577 }
2549 2578
2550 2579 i40e_intr_chip_init(i40e);
2551 2580
2552 2581 rc = i40e_get_mac_seid(i40e);
2553 2582 if (rc == -1) {
2554 2583 i40e_error(i40e, "failed to obtain MAC Uplink SEID");
2555 2584 return (B_FALSE);
2556 2585 }
2557 2586 i40e->i40e_mac_seid = (uint16_t)rc;
2558 2587
2559 2588 /*
2560 2589 * Create a VEB in order to support multiple VSIs. Each VSI
2561 2590 * functions as a MAC group. This call sets the PF's MAC as
2562 2591 * the uplink port and the PF's default VSI as the default
2563 2592 * downlink port.
2564 2593 */
2565 2594 rc = i40e_aq_add_veb(hw, i40e->i40e_mac_seid, I40E_DEF_VSI_SEID(i40e),
2566 2595 0x1, B_TRUE, &i40e->i40e_veb_seid, B_FALSE, NULL);
2567 2596 if (rc != I40E_SUCCESS) {
2568 2597 i40e_error(i40e, "i40e_aq_add_veb() failed %d: %d", rc,
2569 2598 hw->aq.asq_last_status);
2570 2599 return (B_FALSE);
2571 2600 }
2572 2601
2573 2602 if (!i40e_config_def_vsi(i40e, hw))
2574 2603 return (B_FALSE);
2575 2604
2576 2605 for (uint_t i = 1; i < i40e->i40e_num_rx_groups; i++) {
2577 2606 if (!i40e_add_vsi(i40e, hw, i))
2578 2607 return (B_FALSE);
2579 2608 }
2580 2609
2581 2610 if (!i40e_config_rss(i40e, hw))
2582 2611 return (B_FALSE);
2583 2612
2584 2613 i40e_flush(hw);
2585 2614
2586 2615 return (B_TRUE);
2587 2616 }
2588 2617
2589 2618 /*
2590 2619 * Take care of tearing down the rx ring. See 8.3.3.1.2 for more information.
2591 2620 */
2592 2621 static void
2593 2622 i40e_shutdown_rx_ring(i40e_trqpair_t *itrq)
2594 2623 {
2595 2624 i40e_t *i40e = itrq->itrq_i40e;
2596 2625 i40e_hw_t *hw = &i40e->i40e_hw_space;
2597 2626 uint32_t reg;
2598 2627
2599 2628 /*
2600 2629 * Step 1. 8.3.3.1.2 suggests the interrupt is removed from the
2601 2630 * hardware interrupt linked list (see i40e_intr.c) but for
2602 2631 * simplicity we keep this list immutable until the device
2603 2632 * (distinct from an individual ring) is stopped.
2604 2633 */
2605 2634
2606 2635 /*
2607 2636 * Step 2. Request the queue by clearing QENA_REQ. It may not be
2608 2637 * set due to unwinding from failures and a partially enabled
2609 2638 * ring set.
2610 2639 */
2611 2640 reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
2612 2641 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK))
2613 2642 return;
2614 2643 VERIFY((reg & I40E_QRX_ENA_QENA_REQ_MASK) ==
2615 2644 I40E_QRX_ENA_QENA_REQ_MASK);
2616 2645 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2617 2646 I40E_WRITE_REG(hw, I40E_QRX_ENA(itrq->itrq_index), reg);
2618 2647
2619 2648 /*
2620 2649 * Step 3. Wait for the disable to take, by having QENA_STAT in the FPM
2621 2650 * be cleared. Note that we could still receive data in the queue during
2622 2651 * this time. We don't actually wait for this now and instead defer this
2623 2652 * to i40e_shutdown_ring_wait(), after we've interleaved disabling the
2624 2653 * TX queue as well.
2625 2654 */
2626 2655 }
2627 2656
2628 2657 static void
2629 2658 i40e_shutdown_tx_ring(i40e_trqpair_t *itrq)
2630 2659 {
2631 2660 i40e_t *i40e = itrq->itrq_i40e;
2632 2661 i40e_hw_t *hw = &i40e->i40e_hw_space;
2633 2662 uint32_t reg;
2634 2663
2635 2664 /*
2636 2665 * Step 2. Set the SET_QDIS flag for the queue.
2637 2666 */
2638 2667 i40e_pre_tx_queue_cfg(hw, itrq->itrq_index, B_FALSE);
2639 2668
2640 2669 /*
2641 2670 * Step 3. Wait at least 400 usec.
2642 2671 */
2643 2672 drv_usecwait(500);
2644 2673
2645 2674 /*
2646 2675 * Step 4. Clear the QENA_REQ flag which tells hardware to
2647 2676 * quiesce. If QENA_REQ is not already set then that means that
2648 2677 * we likely already tried to disable this queue.
2649 2678 */
2650 2679 reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
2651 2680 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) != 0) {
2652 2681 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2653 2682 I40E_WRITE_REG(hw, I40E_QTX_ENA(itrq->itrq_index), reg);
2654 2683 }
2655 2684
2656 2685 /*
2657 2686 * Step 5. Wait for the drain to finish. This will be done by the
2658 2687 * hardware removing the QENA_STAT flag from the queue. Rather than
2659 2688 * waiting here, we interleave it with the receive shutdown in
2660 2689 * i40e_shutdown_ring_wait().
2661 2690 */
2662 2691 }
2663 2692
2664 2693 /*
2665 2694 * Wait for a ring to be shut down. e.g. Steps 2 and 5 from the above
2666 2695 * functions.
2667 2696 */
2668 2697 static boolean_t
2669 2698 i40e_shutdown_ring_wait(i40e_trqpair_t *itrq)
2670 2699 {
2671 2700 i40e_t *i40e = itrq->itrq_i40e;
2672 2701 i40e_hw_t *hw = &i40e->i40e_hw_space;
2673 2702 uint32_t reg;
2674 2703 int try;
2675 2704
2676 2705 for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) {
2677 2706 reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
2678 2707 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2679 2708 break;
2680 2709 i40e_msec_delay(I40E_RING_WAIT_PAUSE);
2681 2710 }
2682 2711
2683 2712 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) != 0) {
2684 2713 i40e_error(i40e, "timed out disabling rx queue %d",
2685 2714 itrq->itrq_index);
2686 2715 return (B_FALSE);
2687 2716 }
2688 2717
2689 2718 for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) {
2690 2719 reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
2691 2720 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2692 2721 break;
2693 2722 i40e_msec_delay(I40E_RING_WAIT_PAUSE);
2694 2723 }
2695 2724
2696 2725 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) != 0) {
2697 2726 i40e_error(i40e, "timed out disabling tx queue %d",
2698 2727 itrq->itrq_index);
2699 2728 return (B_FALSE);
2700 2729 }
2701 2730
2702 2731 return (B_TRUE);
2703 2732 }
2704 2733
2705 2734
2706 2735 /*
2707 2736 * Shutdown an individual ring and release any memory.
2708 2737 */
2709 2738 boolean_t
2710 2739 i40e_shutdown_ring(i40e_trqpair_t *itrq)
2711 2740 {
2712 2741 boolean_t rv = B_TRUE;
2713 2742
2714 2743 /*
2715 2744 * Tell transmit path to quiesce, and wait until done.
2716 2745 */
2717 2746 if (i40e_ring_tx_quiesce(itrq)) {
2718 2747 /* Already quiesced. */
2719 2748 return (B_TRUE);
2720 2749 }
2721 2750
2722 2751 i40e_shutdown_rx_ring(itrq);
2723 2752 i40e_shutdown_tx_ring(itrq);
2724 2753 if (!i40e_shutdown_ring_wait(itrq))
2725 2754 rv = B_FALSE;
2726 2755
2727 2756 /*
2728 2757 * After the ring has stopped, we need to wait 50ms before
2729 2758 * programming it again. Rather than wait here, we'll record
2730 2759 * the time the ring was stopped. When the ring is started, we'll
2731 2760 * check if enough time has expired and then wait if necessary.
2732 2761 */
2733 2762 itrq->irtq_time_stopped = gethrtime();
2734 2763
2735 2764 /*
2736 2765 * The rings have been stopped in the hardware, now wait for
2737 2766 * a possibly active interrupt thread.
2738 2767 */
2739 2768 i40e_intr_quiesce(itrq);
2740 2769
2741 2770 mutex_enter(&itrq->itrq_tx_lock);
2742 2771 i40e_tx_cleanup_ring(itrq);
2743 2772 mutex_exit(&itrq->itrq_tx_lock);
2744 2773
2745 2774 i40e_free_ring_mem(itrq, B_FALSE);
2746 2775
2747 2776 return (rv);
2748 2777 }
2749 2778
2750 2779 /*
2751 2780 * Shutdown all the rings.
2752 2781 * Called from i40e_stop(), and hopefully the mac layer has already
2753 2782 * called ring stop for each ring, which would make this almost a no-op.
2754 2783 */
2755 2784 static boolean_t
2756 2785 i40e_shutdown_rings(i40e_t *i40e)
2757 2786 {
2758 2787 boolean_t rv = B_TRUE;
2759 2788 int i;
2760 2789
2761 2790 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
2762 2791 if (!i40e_shutdown_ring(&i40e->i40e_trqpairs[i]))
2763 2792 rv = B_FALSE;
2764 2793 }
2765 2794
2766 2795 return (rv);
2767 2796 }
2768 2797
2769 2798 static void
2770 2799 i40e_setup_rx_descs(i40e_trqpair_t *itrq)
2771 2800 {
2772 2801 int i;
2773 2802 i40e_rx_data_t *rxd = itrq->itrq_rxdata;
2774 2803
2775 2804 for (i = 0; i < rxd->rxd_ring_size; i++) {
2776 2805 i40e_rx_control_block_t *rcb;
2777 2806 i40e_rx_desc_t *rdesc;
2778 2807
2779 2808 rcb = rxd->rxd_work_list[i];
2780 2809 rdesc = &rxd->rxd_desc_ring[i];
2781 2810
2782 2811 rdesc->read.pkt_addr =
2783 2812 CPU_TO_LE64((uintptr_t)rcb->rcb_dma.dmab_dma_address);
2784 2813 rdesc->read.hdr_addr = 0;
2785 2814 }
2786 2815 }
2787 2816
2788 2817 static boolean_t
2789 2818 i40e_setup_rx_hmc(i40e_trqpair_t *itrq)
2790 2819 {
2791 2820 i40e_rx_data_t *rxd = itrq->itrq_rxdata;
2792 2821 i40e_t *i40e = itrq->itrq_i40e;
2793 2822 i40e_hw_t *hw = &i40e->i40e_hw_space;
2794 2823
2795 2824 struct i40e_hmc_obj_rxq rctx;
2796 2825 int err;
2797 2826
2798 2827 bzero(&rctx, sizeof (struct i40e_hmc_obj_rxq));
2799 2828 rctx.base = rxd->rxd_desc_area.dmab_dma_address /
2800 2829 I40E_HMC_RX_CTX_UNIT;
2801 2830 rctx.qlen = rxd->rxd_ring_size;
2802 2831 VERIFY(i40e->i40e_rx_buf_size >= I40E_HMC_RX_DBUFF_MIN);
2803 2832 VERIFY(i40e->i40e_rx_buf_size <= I40E_HMC_RX_DBUFF_MAX);
2804 2833 rctx.dbuff = i40e->i40e_rx_buf_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
2805 2834 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2806 2835 rctx.dtype = I40E_HMC_RX_DTYPE_NOSPLIT;
2807 2836 rctx.dsize = I40E_HMC_RX_DSIZE_32BYTE;
2808 2837 rctx.crcstrip = I40E_HMC_RX_CRCSTRIP_ENABLE;
2809 2838 rctx.fc_ena = I40E_HMC_RX_FC_DISABLE;
2810 2839 rctx.l2tsel = I40E_HMC_RX_L2TAGORDER;
2811 2840 rctx.hsplit_0 = I40E_HMC_RX_HDRSPLIT_DISABLE;
2812 2841 rctx.hsplit_1 = I40E_HMC_RX_HDRSPLIT_DISABLE;
2813 2842 rctx.showiv = I40E_HMC_RX_INVLAN_DONTSTRIP;
2814 2843 rctx.rxmax = i40e->i40e_frame_max;
2815 2844 rctx.tphrdesc_ena = I40E_HMC_RX_TPH_DISABLE;
2816 2845 rctx.tphwdesc_ena = I40E_HMC_RX_TPH_DISABLE;
2817 2846 rctx.tphdata_ena = I40E_HMC_RX_TPH_DISABLE;
2818 2847 rctx.tphhead_ena = I40E_HMC_RX_TPH_DISABLE;
2819 2848 rctx.lrxqthresh = I40E_HMC_RX_LOWRXQ_NOINTR;
2820 2849
2821 2850 /*
2822 2851 * This must be set to 0x1, see Table 8-12 in section 8.3.3.2.2.
2823 2852 */
2824 2853 rctx.prefena = I40E_HMC_RX_PREFENA;
2825 2854
2826 2855 err = i40e_clear_lan_rx_queue_context(hw, itrq->itrq_index);
2827 2856 if (err != I40E_SUCCESS) {
2828 2857 i40e_error(i40e, "failed to clear rx queue %d context: %d",
2829 2858 itrq->itrq_index, err);
2830 2859 return (B_FALSE);
2831 2860 }
2832 2861
2833 2862 err = i40e_set_lan_rx_queue_context(hw, itrq->itrq_index, &rctx);
2834 2863 if (err != I40E_SUCCESS) {
2835 2864 i40e_error(i40e, "failed to set rx queue %d context: %d",
2836 2865 itrq->itrq_index, err);
2837 2866 return (B_FALSE);
2838 2867 }
2839 2868
2840 2869 return (B_TRUE);
2841 2870 }
2842 2871
2843 2872 /*
2844 2873 * Take care of setting up the descriptor ring and actually programming the
2845 2874 * device. See 8.3.3.1.1 for the full list of steps we need to do to enable the
2846 2875 * rx rings.
2847 2876 */
2848 2877 static boolean_t
2849 2878 i40e_setup_rx_ring(i40e_trqpair_t *itrq)
2850 2879 {
2851 2880 i40e_t *i40e = itrq->itrq_i40e;
2852 2881 i40e_hw_t *hw = &i40e->i40e_hw_space;
2853 2882 i40e_rx_data_t *rxd = itrq->itrq_rxdata;
2854 2883 uint32_t reg;
2855 2884 int i;
2856 2885
2857 2886 /*
2858 2887 * Step 1. Program all receive ring descriptors.
2859 2888 */
2860 2889 i40e_setup_rx_descs(itrq);
2861 2890
2862 2891 /*
2863 2892 * Step 2. Program the queue's FPM/HMC context.
2864 2893 */
2865 2894 if (!i40e_setup_rx_hmc(itrq))
2866 2895 return (B_FALSE);
2867 2896
2868 2897 /*
2869 2898 * Step 3. Clear the queue's tail pointer and set it to the end
2870 2899 * of the space.
2871 2900 */
2872 2901 I40E_WRITE_REG(hw, I40E_QRX_TAIL(itrq->itrq_index), 0);
2873 2902 I40E_WRITE_REG(hw, I40E_QRX_TAIL(itrq->itrq_index),
2874 2903 rxd->rxd_ring_size - 1);
2875 2904
2876 2905 /*
2877 2906 * Step 4. Enable the queue via the QENA_REQ.
2878 2907 */
2879 2908 reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
2880 2909 VERIFY0(reg & (I40E_QRX_ENA_QENA_REQ_MASK |
2881 2910 I40E_QRX_ENA_QENA_STAT_MASK));
2882 2911 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2883 2912 I40E_WRITE_REG(hw, I40E_QRX_ENA(itrq->itrq_index), reg);
2884 2913
2885 2914 /*
2886 2915 * Step 5. Verify that QENA_STAT has been set. It's promised
2887 2916 * that this should occur within about 10 us, but like other
2888 2917 * systems, we give the card a bit more time.
2889 2918 */
2890 2919 for (i = 0; i < I40E_RING_WAIT_NTRIES; i++) {
2891 2920 reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
2892 2921
2893 2922 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2894 2923 break;
2895 2924 i40e_msec_delay(I40E_RING_WAIT_PAUSE);
2896 2925 }
2897 2926
2898 2927 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2899 2928 i40e_error(i40e, "failed to enable rx queue %d, timed "
2900 2929 "out.", itrq->itrq_index);
2901 2930 return (B_FALSE);
2902 2931 }
2903 2932
2904 2933 return (B_TRUE);
2905 2934 }
2906 2935
2907 2936 static boolean_t
2908 2937 i40e_setup_tx_hmc(i40e_trqpair_t *itrq)
2909 2938 {
2910 2939 i40e_t *i40e = itrq->itrq_i40e;
2911 2940 i40e_hw_t *hw = &i40e->i40e_hw_space;
2912 2941
2913 2942 struct i40e_hmc_obj_txq tctx;
2914 2943 struct i40e_vsi_context context;
2915 2944 int err;
2916 2945
2917 2946 bzero(&tctx, sizeof (struct i40e_hmc_obj_txq));
2918 2947 tctx.new_context = I40E_HMC_TX_NEW_CONTEXT;
2919 2948 tctx.base = itrq->itrq_desc_area.dmab_dma_address /
2920 2949 I40E_HMC_TX_CTX_UNIT;
2921 2950 tctx.fc_ena = I40E_HMC_TX_FC_DISABLE;
2922 2951 tctx.timesync_ena = I40E_HMC_TX_TS_DISABLE;
2923 2952 tctx.fd_ena = I40E_HMC_TX_FD_DISABLE;
2924 2953 tctx.alt_vlan_ena = I40E_HMC_TX_ALT_VLAN_DISABLE;
2925 2954 tctx.head_wb_ena = I40E_HMC_TX_WB_ENABLE;
2926 2955 tctx.qlen = itrq->itrq_tx_ring_size;
2927 2956 tctx.tphrdesc_ena = I40E_HMC_TX_TPH_DISABLE;
2928 2957 tctx.tphrpacket_ena = I40E_HMC_TX_TPH_DISABLE;
2929 2958 tctx.tphwdesc_ena = I40E_HMC_TX_TPH_DISABLE;
2930 2959 tctx.head_wb_addr = itrq->itrq_desc_area.dmab_dma_address +
2931 2960 sizeof (i40e_tx_desc_t) * itrq->itrq_tx_ring_size;
2932 2961
2933 2962 /*
2934 2963 * This field isn't actually documented, like crc, but it suggests that
2935 2964 * it should be zeroed. We leave both of these here because of that for
2936 2965 * now. We should check with Intel on why these are here even.
2937 2966 */
2938 2967 tctx.crc = 0;
2939 2968 tctx.rdylist_act = 0;
2940 2969
2941 2970 /*
2942 2971 * We're supposed to assign the rdylist field with the value of the
2943 2972 * traffic class index for the first device. We query the VSI parameters
2944 2973 * again to get what the handle is. Note that every queue is always
2945 2974 * assigned to traffic class zero, because we don't actually use them.
2946 2975 */
2947 2976 bzero(&context, sizeof (struct i40e_vsi_context));
2948 2977 context.seid = I40E_DEF_VSI_SEID(i40e);
2949 2978 context.pf_num = hw->pf_id;
2950 2979 err = i40e_aq_get_vsi_params(hw, &context, NULL);
2951 2980 if (err != I40E_SUCCESS) {
2952 2981 i40e_error(i40e, "get VSI params failed with %d", err);
2953 2982 return (B_FALSE);
2954 2983 }
2955 2984 tctx.rdylist = LE_16(context.info.qs_handle[0]);
2956 2985
2957 2986 err = i40e_clear_lan_tx_queue_context(hw, itrq->itrq_index);
2958 2987 if (err != I40E_SUCCESS) {
2959 2988 i40e_error(i40e, "failed to clear tx queue %d context: %d",
2960 2989 itrq->itrq_index, err);
2961 2990 return (B_FALSE);
2962 2991 }
2963 2992
2964 2993 err = i40e_set_lan_tx_queue_context(hw, itrq->itrq_index, &tctx);
2965 2994 if (err != I40E_SUCCESS) {
2966 2995 i40e_error(i40e, "failed to set tx queue %d context: %d",
2967 2996 itrq->itrq_index, err);
2968 2997 return (B_FALSE);
2969 2998 }
2970 2999
2971 3000 return (B_TRUE);
2972 3001 }
2973 3002
2974 3003 /*
2975 3004 * Take care of setting up the descriptor ring and actually programming the
2976 3005 * device. See 8.4.3.1.1 for what we need to do here.
2977 3006 */
2978 3007 static boolean_t
2979 3008 i40e_setup_tx_ring(i40e_trqpair_t *itrq)
2980 3009 {
2981 3010 i40e_t *i40e = itrq->itrq_i40e;
2982 3011 i40e_hw_t *hw = &i40e->i40e_hw_space;
2983 3012 uint32_t reg;
2984 3013 int i;
2985 3014
2986 3015 /*
2987 3016 * Step 1. Clear the queue disable flag and verify that the
2988 3017 * index is set correctly.
2989 3018 */
2990 3019 i40e_pre_tx_queue_cfg(hw, itrq->itrq_index, B_TRUE);
2991 3020
2992 3021 /*
2993 3022 * Step 2. Prepare the queue's FPM/HMC context.
2994 3023 */
2995 3024 if (!i40e_setup_tx_hmc(itrq))
2996 3025 return (B_FALSE);
2997 3026
2998 3027 /*
2999 3028 * Step 3. Verify that it's clear that this PF owns this queue.
3000 3029 */
3001 3030 reg = I40E_QTX_CTL_PF_QUEUE;
3002 3031 reg |= (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3003 3032 I40E_QTX_CTL_PF_INDX_MASK;
3004 3033 I40E_WRITE_REG(hw, I40E_QTX_CTL(itrq->itrq_index), reg);
3005 3034 i40e_flush(hw);
3006 3035
3007 3036 /*
3008 3037 * Step 4. Set the QENA_REQ flag.
3009 3038 */
3010 3039 reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
3011 3040 VERIFY0(reg & (I40E_QTX_ENA_QENA_REQ_MASK |
3012 3041 I40E_QTX_ENA_QENA_STAT_MASK));
3013 3042 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3014 3043 I40E_WRITE_REG(hw, I40E_QTX_ENA(itrq->itrq_index), reg);
3015 3044
3016 3045 /*
3017 3046 * Step 5. Verify that QENA_STAT has been set. It's promised
3018 3047 * that this should occur within about 10 us, but like BSD,
3019 3048 * we'll try for up to 100 ms for this queue.
3020 3049 */
3021 3050 for (i = 0; i < I40E_RING_WAIT_NTRIES; i++) {
3022 3051 reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
3023 3052
3024 3053 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3025 3054 break;
3026 3055 i40e_msec_delay(I40E_RING_WAIT_PAUSE);
3027 3056 }
3028 3057
3029 3058 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3030 3059 i40e_error(i40e, "failed to enable tx queue %d, timed "
3031 3060 "out", itrq->itrq_index);
3032 3061 return (B_FALSE);
3033 3062 }
3034 3063
3035 3064 return (B_TRUE);
3036 3065 }
3037 3066
3038 3067 int
3039 3068 i40e_setup_ring(i40e_trqpair_t *itrq)
3040 3069 {
3041 3070 i40e_t *i40e = itrq->itrq_i40e;
3042 3071 hrtime_t now, gap;
3043 3072
3044 3073 if (!i40e_alloc_ring_mem(itrq)) {
3045 3074 i40e_error(i40e, "Failed to allocate ring memory");
3046 3075 return (ENOMEM);
3047 3076 }
3048 3077
3049 3078 /*
3050 3079 * 8.3.3.1.1 Receive Queue Enable Flow states software should
3051 3080 * wait at least 50ms between ring disable and enable. See how
3052 3081 * long we need to wait, and wait only if required.
3053 3082 */
3054 3083 now = gethrtime();
3055 3084 gap = NSEC2MSEC(now - itrq->irtq_time_stopped);
3056 3085 if (gap < I40E_RING_ENABLE_GAP && gap != 0)
3057 3086 delay(drv_usectohz(gap * 1000));
3058 3087
3059 3088 mutex_enter(&itrq->itrq_intr_lock);
3060 3089 if (!i40e_setup_rx_ring(itrq))
3061 3090 goto failed;
3062 3091
3063 3092 if (!i40e_setup_tx_ring(itrq))
3064 3093 goto failed;
3065 3094
3066 3095 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
3067 3096 DDI_FM_OK)
3068 3097 goto failed;
3069 3098
3070 3099 itrq->itrq_intr_quiesce = B_FALSE;
3071 3100 mutex_exit(&itrq->itrq_intr_lock);
3072 3101
3073 3102 mutex_enter(&itrq->itrq_tx_lock);
3074 3103 itrq->itrq_tx_quiesce = B_FALSE;
3075 3104 mutex_exit(&itrq->itrq_tx_lock);
3076 3105
3077 3106 return (0);
3078 3107
3079 3108 failed:
3080 3109 mutex_exit(&itrq->itrq_intr_lock);
3081 3110 i40e_free_ring_mem(itrq, B_TRUE);
3082 3111 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3083 3112
3084 3113 return (EIO);
3085 3114 }
3086 3115
3087 3116 void
3088 3117 i40e_stop(i40e_t *i40e)
3089 3118 {
3090 3119 uint_t i;
3091 3120 i40e_hw_t *hw = &i40e->i40e_hw_space;
3092 3121
3093 3122 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
3094 3123
3095 3124 /*
3096 3125 * Shutdown and drain the tx and rx pipeline. We do this using the
3097 3126 * following steps.
3098 3127 *
3099 3128 * 1) Shutdown interrupts to all the queues (trying to keep the admin
3100 3129 * queue alive).
3101 3130 *
3102 3131 * 2) Remove all of the interrupt tx and rx causes by setting the
3103 3132 * interrupt linked lists to zero.
3104 3133 *
3105 3134 * 2) Shutdown the tx and rx rings. Because i40e_shutdown_rings() should
3106 3135 * wait for all the queues to be disabled, once we reach that point
3107 3136 * it should be safe to free associated data.
3108 3137 *
3109 3138 * 4) Wait 50ms after all that is done. This ensures that the rings are
3110 3139 * ready for programming again and we don't have to think about this
3111 3140 * in other parts of the driver.
3112 3141 *
3113 3142 * 5) Disable remaining chip interrupts, (admin queue, etc.)
3114 3143 *
3115 3144 * 6) Verify that FM is happy with all the register accesses we
3116 3145 * performed.
3117 3146 */
3118 3147 i40e_intr_io_disable_all(i40e);
3119 3148 i40e_intr_io_clear_cause(i40e);
3120 3149
3121 3150 if (!i40e_shutdown_rings(i40e))
3122 3151 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3123 3152
3124 3153 /*
3125 3154 * We don't delete the default VSI because it replaces the VEB
3126 3155 * after VEB deletion (see the "Delete Element" section).
3127 3156 * Furthermore, since the default VSI is provided by the
3128 3157 * firmware, we never attempt to delete it.
3129 3158 */
3130 3159 for (i = 1; i < i40e->i40e_num_rx_groups; i++) {
3131 3160 i40e_delete_vsi(i40e, i);
3132 3161 }
3133 3162
3134 3163 if (i40e->i40e_veb_seid != 0) {
3135 3164 int rc = i40e_aq_delete_element(hw, i40e->i40e_veb_seid, NULL);
3136 3165
3137 3166 if (rc != I40E_SUCCESS) {
3138 3167 i40e_error(i40e, "Failed to delete VEB %d: %d", rc,
3139 3168 hw->aq.asq_last_status);
3140 3169 }
3141 3170
3142 3171 i40e->i40e_veb_seid = 0;
3143 3172 }
3144 3173
3145 3174 i40e_intr_chip_fini(i40e);
3146 3175
3147 3176 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) !=
3148 3177 DDI_FM_OK) {
3149 3178 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3150 3179 }
3151 3180
3152 3181 for (i = 0; i < i40e->i40e_num_rx_groups; i++) {
3153 3182 i40e_stat_vsi_fini(i40e, i);
3154 3183 }
3155 3184
3156 3185 i40e->i40e_link_speed = 0;
3157 3186 i40e->i40e_link_duplex = 0;
3158 3187 i40e_link_state_set(i40e, LINK_STATE_UNKNOWN);
3159 3188 }
3160 3189
3161 3190 boolean_t
3162 3191 i40e_start(i40e_t *i40e)
3163 3192 {
3164 3193 i40e_hw_t *hw = &i40e->i40e_hw_space;
3165 3194 boolean_t rc = B_TRUE;
3166 3195 int err;
3167 3196
3168 3197 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
3169 3198
3170 3199 if (!i40e_chip_start(i40e)) {
3171 3200 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE);
3172 3201 rc = B_FALSE;
3173 3202 goto done;
3174 3203 }
3175 3204
3176 3205 /*
3177 3206 * Enable broadcast traffic; however, do not enable multicast traffic.
3178 3207 * That's handle exclusively through MAC's mc_multicst routines.
3179 3208 */
3180 3209 err = i40e_aq_set_vsi_broadcast(hw, I40E_DEF_VSI_SEID(i40e), B_TRUE,
3181 3210 NULL);
3182 3211 if (err != I40E_SUCCESS) {
3183 3212 i40e_error(i40e, "failed to set default VSI: %d", err);
3184 3213 rc = B_FALSE;
3185 3214 goto done;
3186 3215 }
3187 3216
3188 3217 err = i40e_aq_set_mac_config(hw, i40e->i40e_frame_max, B_TRUE, 0,
3189 3218 B_FALSE, NULL);
3190 3219 if (err != I40E_SUCCESS) {
3191 3220 i40e_error(i40e, "failed to set MAC config: %d", err);
3192 3221 rc = B_FALSE;
3193 3222 goto done;
3194 3223 }
3195 3224
3196 3225 /*
3197 3226 * Finally, make sure that we're happy from an FM perspective.
3198 3227 */
3199 3228 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
3200 3229 DDI_FM_OK) {
3201 3230 rc = B_FALSE;
3202 3231 goto done;
3203 3232 }
3204 3233
3205 3234 /* Clear state bits prior to final interrupt enabling. */
3206 3235 atomic_and_32(&i40e->i40e_state,
3207 3236 ~(I40E_ERROR | I40E_STALL | I40E_OVERTEMP));
3208 3237
3209 3238 i40e_intr_io_enable_all(i40e);
3210 3239
3211 3240 done:
3212 3241 if (rc == B_FALSE) {
3213 3242 i40e_stop(i40e);
3214 3243 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3215 3244 }
3216 3245
3217 3246 return (rc);
3218 3247 }
3219 3248
3220 3249 /*
3221 3250 * We may have loaned up descriptors to the stack. As such, if we still have
3222 3251 * them outstanding, then we will not continue with detach.
3223 3252 */
3224 3253 static boolean_t
3225 3254 i40e_drain_rx(i40e_t *i40e)
3226 3255 {
3227 3256 mutex_enter(&i40e->i40e_rx_pending_lock);
3228 3257 while (i40e->i40e_rx_pending > 0) {
3229 3258 if (cv_reltimedwait(&i40e->i40e_rx_pending_cv,
3230 3259 &i40e->i40e_rx_pending_lock,
3231 3260 drv_usectohz(I40E_DRAIN_RX_WAIT), TR_CLOCK_TICK) == -1) {
3232 3261 mutex_exit(&i40e->i40e_rx_pending_lock);
3233 3262 return (B_FALSE);
3234 3263 }
3235 3264 }
3236 3265 mutex_exit(&i40e->i40e_rx_pending_lock);
3237 3266
3238 3267 return (B_TRUE);
3239 3268 }
3240 3269
3241 3270 /*
3242 3271 * DDI UFM Callbacks
3243 3272 */
3244 3273 static int
3245 3274 i40e_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
3246 3275 ddi_ufm_image_t *img)
3247 3276 {
3248 3277 if (imgno != 0)
3249 3278 return (EINVAL);
3250 3279
3251 3280 ddi_ufm_image_set_desc(img, "Firmware");
3252 3281 ddi_ufm_image_set_nslots(img, 1);
3253 3282
3254 3283 return (0);
3255 3284 }
3256 3285
3257 3286 static int
3258 3287 i40e_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
3259 3288 uint_t slotno, ddi_ufm_slot_t *slot)
3260 3289 {
3261 3290 i40e_t *i40e = (i40e_t *)arg;
3262 3291 char *fw_ver = NULL, *fw_bld = NULL, *api_ver = NULL;
3263 3292 nvlist_t *misc = NULL;
3264 3293 uint_t flags = DDI_PROP_DONTPASS;
3265 3294 int err;
3266 3295
3267 3296 if (imgno != 0 || slotno != 0 ||
3268 3297 ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
3269 3298 "firmware-version", &fw_ver) != DDI_PROP_SUCCESS ||
3270 3299 ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
3271 3300 "firmware-build", &fw_bld) != DDI_PROP_SUCCESS ||
3272 3301 ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
3273 3302 "api-version", &api_ver) != DDI_PROP_SUCCESS) {
3274 3303 err = EINVAL;
3275 3304 goto err;
3276 3305 }
3277 3306
3278 3307 ddi_ufm_slot_set_attrs(slot, DDI_UFM_ATTR_ACTIVE);
3279 3308 ddi_ufm_slot_set_version(slot, fw_ver);
3280 3309
3281 3310 (void) nvlist_alloc(&misc, NV_UNIQUE_NAME, KM_SLEEP);
3282 3311 if ((err = nvlist_add_string(misc, "firmware-build", fw_bld)) != 0 ||
3283 3312 (err = nvlist_add_string(misc, "api-version", api_ver)) != 0) {
3284 3313 goto err;
3285 3314 }
3286 3315 ddi_ufm_slot_set_misc(slot, misc);
3287 3316
3288 3317 ddi_prop_free(fw_ver);
3289 3318 ddi_prop_free(fw_bld);
3290 3319 ddi_prop_free(api_ver);
3291 3320
3292 3321 return (0);
3293 3322 err:
3294 3323 nvlist_free(misc);
3295 3324 if (fw_ver != NULL)
3296 3325 ddi_prop_free(fw_ver);
3297 3326 if (fw_bld != NULL)
3298 3327 ddi_prop_free(fw_bld);
3299 3328 if (api_ver != NULL)
3300 3329 ddi_prop_free(api_ver);
3301 3330
3302 3331 return (err);
3303 3332 }
3304 3333
3305 3334 static int
3306 3335 i40e_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
3307 3336 {
3308 3337 *caps = DDI_UFM_CAP_REPORT;
3309 3338
3310 3339 return (0);
3311 3340 }
3312 3341
3313 3342 static ddi_ufm_ops_t i40e_ufm_ops = {
3314 3343 NULL,
3315 3344 i40e_ufm_fill_image,
3316 3345 i40e_ufm_fill_slot,
3317 3346 i40e_ufm_getcaps
3318 3347 };
3319 3348
3320 3349 static int
3321 3350 i40e_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
3322 3351 {
3323 3352 i40e_t *i40e;
3324 3353 struct i40e_osdep *osdep;
3325 3354 i40e_hw_t *hw;
3326 3355 int instance;
3327 3356
3328 3357 if (cmd != DDI_ATTACH)
3329 3358 return (DDI_FAILURE);
3330 3359
3331 3360 instance = ddi_get_instance(devinfo);
3332 3361 i40e = kmem_zalloc(sizeof (i40e_t), KM_SLEEP);
3333 3362
3334 3363 i40e->i40e_aqbuf = kmem_zalloc(I40E_ADMINQ_BUFSZ, KM_SLEEP);
3335 3364 i40e->i40e_instance = instance;
3336 3365 i40e->i40e_dip = devinfo;
3337 3366
3338 3367 hw = &i40e->i40e_hw_space;
3339 3368 osdep = &i40e->i40e_osdep_space;
3340 3369 hw->back = osdep;
3341 3370 osdep->ios_i40e = i40e;
3342 3371
3343 3372 ddi_set_driver_private(devinfo, i40e);
3344 3373
3345 3374 i40e_fm_init(i40e);
3346 3375 i40e->i40e_attach_progress |= I40E_ATTACH_FM_INIT;
3347 3376
3348 3377 if (pci_config_setup(devinfo, &osdep->ios_cfg_handle) != DDI_SUCCESS) {
3349 3378 i40e_error(i40e, "Failed to map PCI configurations.");
3350 3379 goto attach_fail;
3351 3380 }
3352 3381 i40e->i40e_attach_progress |= I40E_ATTACH_PCI_CONFIG;
3353 3382
3354 3383 i40e_identify_hardware(i40e);
3355 3384
3356 3385 if (!i40e_regs_map(i40e)) {
3357 3386 i40e_error(i40e, "Failed to map device registers.");
3358 3387 goto attach_fail;
3359 3388 }
3360 3389 i40e->i40e_attach_progress |= I40E_ATTACH_REGS_MAP;
3361 3390
3362 3391 i40e_init_properties(i40e);
3363 3392 i40e->i40e_attach_progress |= I40E_ATTACH_PROPS;
3364 3393
3365 3394 if (!i40e_common_code_init(i40e, hw))
3366 3395 goto attach_fail;
3367 3396 i40e->i40e_attach_progress |= I40E_ATTACH_COMMON_CODE;
3368 3397
3369 3398 /*
3370 3399 * When we participate in IRM, we should make sure that we register
3371 3400 * ourselves with it before callbacks.
3372 3401 */
3373 3402 if (!i40e_alloc_intrs(i40e, devinfo)) {
3374 3403 i40e_error(i40e, "Failed to allocate interrupts.");
3375 3404 goto attach_fail;
3376 3405 }
3377 3406 i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_INTR;
3378 3407
3379 3408 if (!i40e_alloc_trqpairs(i40e)) {
3380 3409 i40e_error(i40e,
3381 3410 "Failed to allocate receive & transmit rings.");
3382 3411 goto attach_fail;
3383 3412 }
3384 3413 i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_RINGSLOCKS;
3385 3414
3386 3415 if (!i40e_map_intrs_to_vectors(i40e)) {
3387 3416 i40e_error(i40e, "Failed to map interrupts to vectors.");
3388 3417 goto attach_fail;
3389 3418 }
3390 3419
3391 3420 if (!i40e_add_intr_handlers(i40e)) {
3392 3421 i40e_error(i40e, "Failed to add the interrupt handlers.");
3393 3422 goto attach_fail;
3394 3423 }
3395 3424 i40e->i40e_attach_progress |= I40E_ATTACH_ADD_INTR;
3396 3425
3397 3426 if (!i40e_final_init(i40e)) {
3398 3427 i40e_error(i40e, "Final initialization failed.");
3399 3428 goto attach_fail;
3400 3429 }
3401 3430 i40e->i40e_attach_progress |= I40E_ATTACH_INIT;
3402 3431
3403 3432 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) !=
3404 3433 DDI_FM_OK) {
3405 3434 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3406 3435 goto attach_fail;
3407 3436 }
3408 3437
3409 3438 if (!i40e_stats_init(i40e)) {
3410 3439 i40e_error(i40e, "Stats initialization failed.");
3411 3440 goto attach_fail;
3412 3441 }
3413 3442 i40e->i40e_attach_progress |= I40E_ATTACH_STATS;
3414 3443
3415 3444 if (!i40e_register_mac(i40e)) {
3416 3445 i40e_error(i40e, "Failed to register to MAC/GLDv3");
3417 3446 goto attach_fail;
3418 3447 }
3419 3448 i40e->i40e_attach_progress |= I40E_ATTACH_MAC;
3420 3449
3421 3450 i40e->i40e_periodic_id = ddi_periodic_add(i40e_timer, i40e,
3422 3451 I40E_CYCLIC_PERIOD, DDI_IPL_0);
3423 3452 if (i40e->i40e_periodic_id == 0) {
3424 3453 i40e_error(i40e, "Failed to add the link-check timer");
3425 3454 goto attach_fail;
3426 3455 }
3427 3456 i40e->i40e_attach_progress |= I40E_ATTACH_LINK_TIMER;
3428 3457
3429 3458 if (!i40e_enable_interrupts(i40e)) {
3430 3459 i40e_error(i40e, "Failed to enable DDI interrupts");
3431 3460 goto attach_fail;
3432 3461 }
3433 3462 i40e->i40e_attach_progress |= I40E_ATTACH_ENABLE_INTR;
3434 3463
3435 3464 if (i40e->i40e_hw_space.bus.func == 0) {
3436 3465 if (ddi_ufm_init(i40e->i40e_dip, DDI_UFM_CURRENT_VERSION,
3437 3466 &i40e_ufm_ops, &i40e->i40e_ufmh, i40e) != 0) {
3438 3467 i40e_error(i40e, "failed to initialize UFM subsystem");
3439 3468 goto attach_fail;
3440 3469 }
3441 3470 ddi_ufm_update(i40e->i40e_ufmh);
3442 3471 i40e->i40e_attach_progress |= I40E_ATTACH_UFM_INIT;
3443 3472 }
3444 3473
3445 3474 atomic_or_32(&i40e->i40e_state, I40E_INITIALIZED);
3446 3475
3447 3476 mutex_enter(&i40e_glock);
3448 3477 list_insert_tail(&i40e_glist, i40e);
3449 3478 mutex_exit(&i40e_glock);
3450 3479
3451 3480 return (DDI_SUCCESS);
3452 3481
3453 3482 attach_fail:
3454 3483 i40e_unconfigure(devinfo, i40e);
3455 3484 return (DDI_FAILURE);
3456 3485 }
3457 3486
3458 3487 static int
3459 3488 i40e_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3460 3489 {
3461 3490 i40e_t *i40e;
3462 3491
3463 3492 if (cmd != DDI_DETACH)
3464 3493 return (DDI_FAILURE);
3465 3494
3466 3495 i40e = (i40e_t *)ddi_get_driver_private(devinfo);
3467 3496 if (i40e == NULL) {
3468 3497 i40e_log(NULL, "i40e_detach() called with no i40e pointer!");
3469 3498 return (DDI_FAILURE);
3470 3499 }
3471 3500
3472 3501 if (i40e_drain_rx(i40e) == B_FALSE) {
3473 3502 i40e_log(i40e, "timed out draining DMA resources, %d buffers "
3474 3503 "remain", i40e->i40e_rx_pending);
3475 3504 return (DDI_FAILURE);
3476 3505 }
3477 3506
3478 3507 mutex_enter(&i40e_glock);
3479 3508 list_remove(&i40e_glist, i40e);
3480 3509 mutex_exit(&i40e_glock);
3481 3510
3482 3511 i40e_unconfigure(devinfo, i40e);
3483 3512
3484 3513 return (DDI_SUCCESS);
3485 3514 }
3486 3515
3487 3516 static struct cb_ops i40e_cb_ops = {
3488 3517 nulldev, /* cb_open */
3489 3518 nulldev, /* cb_close */
3490 3519 nodev, /* cb_strategy */
3491 3520 nodev, /* cb_print */
3492 3521 nodev, /* cb_dump */
3493 3522 nodev, /* cb_read */
3494 3523 nodev, /* cb_write */
3495 3524 nodev, /* cb_ioctl */
3496 3525 nodev, /* cb_devmap */
3497 3526 nodev, /* cb_mmap */
3498 3527 nodev, /* cb_segmap */
3499 3528 nochpoll, /* cb_chpoll */
3500 3529 ddi_prop_op, /* cb_prop_op */
3501 3530 NULL, /* cb_stream */
3502 3531 D_MP | D_HOTPLUG, /* cb_flag */
3503 3532 CB_REV, /* cb_rev */
3504 3533 nodev, /* cb_aread */
3505 3534 nodev /* cb_awrite */
3506 3535 };
3507 3536
3508 3537 static struct dev_ops i40e_dev_ops = {
3509 3538 DEVO_REV, /* devo_rev */
3510 3539 0, /* devo_refcnt */
3511 3540 NULL, /* devo_getinfo */
3512 3541 nulldev, /* devo_identify */
3513 3542 nulldev, /* devo_probe */
3514 3543 i40e_attach, /* devo_attach */
3515 3544 i40e_detach, /* devo_detach */
3516 3545 nodev, /* devo_reset */
3517 3546 &i40e_cb_ops, /* devo_cb_ops */
3518 3547 NULL, /* devo_bus_ops */
3519 3548 nulldev, /* devo_power */
3520 3549 ddi_quiesce_not_supported /* devo_quiesce */
3521 3550 };
3522 3551
3523 3552 static struct modldrv i40e_modldrv = {
3524 3553 &mod_driverops,
3525 3554 i40e_ident,
3526 3555 &i40e_dev_ops
3527 3556 };
3528 3557
3529 3558 static struct modlinkage i40e_modlinkage = {
3530 3559 MODREV_1,
3531 3560 &i40e_modldrv,
3532 3561 NULL
3533 3562 };
3534 3563
3535 3564 /*
3536 3565 * Module Initialization Functions.
3537 3566 */
3538 3567 int
3539 3568 _init(void)
3540 3569 {
3541 3570 int status;
3542 3571
3543 3572 list_create(&i40e_glist, sizeof (i40e_t), offsetof(i40e_t, i40e_glink));
3544 3573 list_create(&i40e_dlist, sizeof (i40e_device_t),
3545 3574 offsetof(i40e_device_t, id_link));
3546 3575 mutex_init(&i40e_glock, NULL, MUTEX_DRIVER, NULL);
3547 3576 mac_init_ops(&i40e_dev_ops, I40E_MODULE_NAME);
3548 3577
3549 3578 status = mod_install(&i40e_modlinkage);
3550 3579 if (status != DDI_SUCCESS) {
3551 3580 mac_fini_ops(&i40e_dev_ops);
3552 3581 mutex_destroy(&i40e_glock);
3553 3582 list_destroy(&i40e_dlist);
3554 3583 list_destroy(&i40e_glist);
3555 3584 }
3556 3585
3557 3586 return (status);
3558 3587 }
3559 3588
3560 3589 int
3561 3590 _info(struct modinfo *modinfop)
3562 3591 {
3563 3592 return (mod_info(&i40e_modlinkage, modinfop));
3564 3593 }
3565 3594
3566 3595 int
3567 3596 _fini(void)
3568 3597 {
3569 3598 int status;
3570 3599
3571 3600 status = mod_remove(&i40e_modlinkage);
3572 3601 if (status == DDI_SUCCESS) {
3573 3602 mac_fini_ops(&i40e_dev_ops);
3574 3603 mutex_destroy(&i40e_glock);
3575 3604 list_destroy(&i40e_dlist);
3576 3605 list_destroy(&i40e_glist);
3577 3606 }
3578 3607
3579 3608 return (status);
3580 3609 }
|
↓ open down ↓ |
1284 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX