1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright (c) 2018, Joyent, Inc.
14 */
15
16 #ifndef _SYS_USB_XHCI_XHCI_H
17 #define _SYS_USB_XHCI_XHCI_H
18
19 /*
20 * Extensible Host Controller Interface (xHCI) USB Driver
21 */
22
23 #include <sys/conf.h>
24 #include <sys/ddi.h>
25 #include <sys/sunddi.h>
26 #include <sys/taskq_impl.h>
27 #include <sys/sysmacros.h>
28 #include <sys/usb/hcd/xhci/xhcireg.h>
29
30 #include <sys/usb/usba.h>
31 #include <sys/usb/usba/hcdi.h>
32 #include <sys/usb/hubd/hub.h>
33 #include <sys/usb/usba/hubdi.h>
34 #include <sys/usb/hubd/hubdvar.h>
35
36
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40
41 /*
42 * The base segment for DMA attributes was determined to be 4k based on xHCI 1.1
43 * / table 54: Data Structure Max Size, Boundary, and Alignment Requirement
44 * Summary. This indicates that the required alignment for most things is
45 * PAGESIZE, which in our current implementation is required to be 4K. We
46 * provide the ring segment value below for the things which need 64K alignment
47 *
48 * Similarly, in the same table, the maximum required alignment is 64 bytes,
49 * hence we use that for everything.
50 *
51 * Next is the scatter/gather lengths. For most of the data structures, we only
52 * want to have a single SGL entry, e.g. just a simple flat mapping. For many of
53 * our transfers, we use the same logic to simplify the implementation of the
54 * driver. However, for bulk transfers, which are the largest by far, we want to
55 * be able to leverage SGLs to give us more DMA flexibility.
56 *
57 * We can transfer up to 64K in one transfer request block (TRB) which
58 * corresponds to a single SGL entry. Each ring we create is a single page in
59 * size and will support at most 256 TRBs. To try and give the operating system
60 * flexibility when allocating DMA transfers, we've opted to allow up to 63
61 * SGLs. Because there isn't a good way to support DMA windows with the xHCI
62 * controller design, if this number is too small then DMA allocations and
63 * binding might fail. If the DMA binding fails, the transfer will fail.
64 *
65 * The reason that we use 63 SGLs and not the expected 64 is that we always need
66 * to allocate an additional TRB for the event data. This leaves us with a
67 * nicely divisible number of entries.
68 *
69 * The final piece of this is the maximum sized transfer that the driver
70 * advertises to the broader framework. This is currently sized at 512 KiB. For
71 * reference the ehci driver sized this value at 640 KiB. It's important to
72 * understand that this isn't reflected in the DMA attribute limitation, because
73 * it's not an attribute of the hardware. Experimentally, this has proven to be
74 * sufficient for most of the drivers that we support today. When considering
75 * increasing this number, please note the impact that might have on the
76 * required number of DMA SGL entries required to satisfy the allocation.
77 *
78 * The value of 512 KiB was originally based on the number of SGLs we supported
79 * multiplied by the maximum transfer size. The original number of
80 * XHCI_TRANSFER_DMA_SGL was 8. The 512 KiB value was based upon taking the
81 * number of SGLs and assuming that each TRB used its maximum transfer size of
82 * 64 KiB.
83 */
84 #define XHCI_TRB_MAX_TRANSFER 65536 /* 64 KiB */
85 #define XHCI_DMA_ALIGN 64
86 #define XHCI_DEF_DMA_SGL 1
87 #define XHCI_TRANSFER_DMA_SGL 63
88 #define XHCI_MAX_TRANSFER 524288 /* 512 KiB */
89
90 /*
91 * Properties and values for rerouting ehci ports to xhci.
92 */
93 #define XHCI_PROP_REROUTE_DISABLE 0
94 #define XHCI_PROP_REROUTE_DEFAULT 1
95
96 /*
97 * This number is a bit made up. Truthfully, the API here isn't the most useful
98 * for what we need to define as it should really be based on the endpoint that
99 * we're interested in rather than the device as a whole.
100 *
101 * We're basically being asked how many TRBs we're willing to schedule in one
102 * go. There's no great way to come up with this number, so we basically are
103 * making up something such that we use up a good portion of a ring, but not too
104 * much of it.
105 */
106 #define XHCI_ISOC_MAX_TRB 64
107
108 #ifdef DEBUG
109 #define XHCI_DMA_SYNC(dma, flag) VERIFY0(ddi_dma_sync( \
110 (dma).xdb_dma_handle, 0, 0, \
111 (flag)))
112 #else
113 #define XHCI_DMA_SYNC(dma, flag) ((void) ddi_dma_sync( \
114 (dma).xdb_dma_handle, 0, 0, \
115 (flag)))
116 #endif
117
118 /*
119 * TRBs need to indicate the number of remaining USB packets in the overall
120 * transfer. This is a 5-bit value, which means that the maximum value we can
121 * store in that TRD field is 31.
122 */
123 #define XHCI_MAX_TDSIZE 31
124
125 /*
126 * This defines a time in 2-ms ticks that is required to wait for the controller
127 * to be ready to go. Section 5.4.8 of the XHCI specification in the description
128 * of the PORTSC register indicates that the upper bound is 20 ms. Therefore the
129 * number of ticks is 10.
130 */
131 #define XHCI_POWER_GOOD 10
132
133 /*
134 * Definitions to determine the default number of interrupts. Note that we only
135 * bother with a single interrupt at this time, though we've arranged the driver
136 * to make it possible to request more if, for some unlikely reason, it becomes
137 * necessary.
138 */
139 #define XHCI_NINTR 1
140
141 /*
142 * Default interrupt modulation value. This enables us to have 4000 interrupts /
143 * second. This is supposed to be the default value of the controller. See xHCI
144 * 1.1 / 4.17.2 for more information.
145 */
146 #define XHCI_IMOD_DEFAULT 0x000003F8U
147
148 /*
149 * Definitions that surround the default values used in various contexts. These
150 * come from various parts of the xHCI specification. In general, see xHCI 1.1 /
151 * 4.8.2. Note that the MPS_MASK is used for ISOCH and INTR endpoints which have
152 * different sizes.
153 *
154 * The burst member is a bit more complicated. By default for USB 2 devices, it
155 * only matters for ISOCH and INTR endpoints and so we use the macros below to
156 * pull it out of the endpoint description's max packet field. For USB 3, it
157 * matters for non-control endpoints. However, it comes out of a companion
158 * description.
159 *
160 * By default the mult member is zero for all cases except for super speed
161 * ISOCH endpoints, where it comes from the companion descriptor.
162 */
163 #define XHCI_CONTEXT_DEF_CERR 3
164 #define XHCI_CONTEXT_ISOCH_CERR 0
165 #define XHCI_CONTEXT_MPS_MASK 0x07ff
166 #define XHCI_CONTEXT_BURST_MASK 0x1800
167 #define XHCI_CONTEXT_BURST_SHIFT 11
168 #define XHCI_CONTEXT_DEF_MULT 0
169 #define XHCI_CONTEXT_DEF_MAX_ESIT 0
170 #define XHCI_CONTEXT_DEF_CTRL_ATL 8
171
172 /*
173 * This number represents the number of transfers that we'll set up for a given
174 * interrupt transfer. Note that the idea here is that we'll want to allocate a
175 * certain number of transfers to basically ensure that we'll always be able to
176 * have a transfer available, even if the system is a bit caught up in trying to
177 * process it and for some reason we can't fire the interrupt. As such, we
178 * basically want to have enough available that at the fastest interval (125 us)
179 * that we have enough. So in this case we choose 8, with the assumption that we
180 * should be able to process at least one in a given millisecond. Note that this
181 * is not based in fact and is really just as much a guess and a hope.
182 *
183 * While we could then use less resources for other interrupt transfers that are
184 * slower, starting with uniform resource usage will make things a bit easier.
185 */
186 #define XHCI_INTR_IN_NTRANSFERS 8
187
188 /*
189 * This number represents the number of xhci_transfer_t structures that we'll
190 * set up for a given isochronous transfer polling request. A given isochronous
191 * transfer may actually have multiple units of time associated with it. As
192 * such, we basically want to treat this like a case of classic double
193 * buffering. We have one ready to go while the other is being filled up. This
194 * will compensate for additional latency in the system. This is smaller than
195 * the Interrupt IN transfer case above as many callers may ask for multiple
196 * intervals in a single request.
197 */
198 #define XHCI_ISOC_IN_NTRANSFERS 2
199
200 #define XHCI_PERIODIC_IN_NTRANSFERS \
201 MAX(XHCI_ISOC_IN_NTRANSFERS, XHCI_INTR_IN_NTRANSFERS)
202
203 /*
204 * Mask for a route string which is a 20-bit value.
205 */
206 #define XHCI_ROUTE_MASK(x) ((x) & 0xfffff)
207
208 /*
209 * This is the default tick that we use for timeouts while endpoints have
210 * outstanding, active, non-periodic transfers. We choose one second as the USBA
211 * specifies timeouts in units of seconds. Note that this is in microseconds, so
212 * it can be fed into drv_usectohz().
213 */
214 #define XHCI_TICK_TIMEOUT_US (MICROSEC)
215
216 /*
217 * Set of bits that we need one of to indicate that this port has something
218 * interesting on it.
219 */
220 #define XHCI_HUB_INTR_CHANGE_MASK (XHCI_PS_CSC | XHCI_PS_PEC | \
221 XHCI_PS_WRC | XHCI_PS_OCC | XHCI_PS_PRC | XHCI_PS_PLC | XHCI_PS_CEC)
222
223 /*
224 * These represent known issues with various xHCI controllers.
225 *
226 * XHCI_QUIRK_NO_MSI MSI support on this controller is known to be
227 * broken.
228 *
229 * XHCI_QUIRK_32_ONLY Only use 32-bit DMA addreses with this
230 * controller.
231 *
232 * XHCI_QUIRK_INTC_EHCI This is an Intel platform which supports
233 * rerouting ports between EHCI and xHCI
234 * controllers on the platform.
235 */
236 typedef enum xhci_quirk {
237 XHCI_QUIRK_NO_MSI = 0x01,
238 XHCI_QUIRK_32_ONLY = 0x02,
239 XHCI_QUIRK_INTC_EHCI = 0x04
240 } xhci_quirk_t;
241
242 /*
243 * xHCI capability parameter flags. These are documented in xHCI 1.1 / 5.3.6.
244 */
245 typedef enum xhci_cap_flags {
246 XCAP_AC64 = 0x001,
247 XCAP_BNC = 0x002,
248 XCAP_CSZ = 0x004,
249 XCAP_PPC = 0x008,
250 XCAP_PIND = 0x010,
251 XCAP_LHRC = 0x020,
252 XCAP_LTC = 0x040,
253 XCAP_NSS = 0x080,
254 XCAP_PAE = 0x100,
255 XCAP_SPC = 0x200,
256 XCAP_SEC = 0x400,
257 XCAP_CFC = 0x800
258 } xchi_cap_flags_t;
259
260 /*
261 * Second set of capabilities, these are documented in xHCI 1.1 / 5.3.9.
262 */
263 typedef enum xhci_cap2_flags {
264 XCAP2_U3C = 0x01,
265 XCAP2_CMC = 0x02,
266 XCAP2_FMC = 0x04,
267 XCAP2_CTC = 0x08,
268 XCAP2_LEC = 0x10,
269 XCAP2_CIC = 0x20
270 } xhci_cap2_flags_t;
271
272 /*
273 * These represent and store the various capability registers that we'll need to
274 * use. In addition, we stash a few other versioning related bits here. Note
275 * that we cache more information than we might need so that we have it for
276 * debugging purposes.
277 */
278 typedef struct xhci_capability {
279 uint8_t xcap_usb_vers;
280 uint16_t xcap_hci_vers;
281 uint32_t xcap_pagesize;
282 uint8_t xcap_max_slots;
283 uint16_t xcap_max_intrs;
284 uint8_t xcap_max_ports;
285 boolean_t xcap_ist_micro;
286 uint8_t xcap_ist;
287 uint16_t xcap_max_esrt;
288 boolean_t xcap_scratch_restore;
289 uint16_t xcap_max_scratch;
290 uint8_t xcap_u1_lat;
291 uint16_t xcap_u2_lat;
292 xchi_cap_flags_t xcap_flags;
293 uint8_t xcap_max_psa;
294 uint16_t xcap_xecp_off;
295 xhci_cap2_flags_t xcap_flags2;
296 int xcap_intr_types;
297 } xhci_capability_t;
298
299 /*
300 * This represents a single logical DMA allocation. For the vast majority of
301 * non-transfer cases, it only represents a single DMA buffer and not a
302 * scatter-gather list.
303 */
304 typedef struct xhci_dma_buffer {
305 caddr_t xdb_va; /* Buffer VA */
306 size_t xdb_len; /* Buffer logical len */
307 ddi_acc_handle_t xdb_acc_handle; /* Access handle */
308 ddi_dma_handle_t xdb_dma_handle; /* DMA handle */
309 int xdb_ncookies; /* Number of actual cookies */
310 ddi_dma_cookie_t xdb_cookies[XHCI_TRANSFER_DMA_SGL];
311 } xhci_dma_buffer_t;
312
313 /*
314 * This is a single transfer descriptor. It's packed to match the hardware
315 * layout.
316 */
317 #pragma pack(1)
318 typedef struct xhci_trb {
319 uint64_t trb_addr;
320 uint32_t trb_status;
321 uint32_t trb_flags;
322 } xhci_trb_t;
323 #pragma pack()
324
325 /*
326 * This represents a single transfer that we want to allocate and perform.
327 */
328 typedef struct xhci_transfer {
329 list_node_t xt_link;
330 hrtime_t xt_sched_time;
331 xhci_dma_buffer_t xt_buffer;
332 uint_t xt_ntrbs;
333 uint_t xt_short;
334 uint_t xt_timeout;
335 usb_cr_t xt_cr;
336 boolean_t xt_data_tohost;
337 xhci_trb_t *xt_trbs;
338 uint64_t *xt_trbs_pa;
339 usb_isoc_pkt_descr_t *xt_isoc;
340 usb_opaque_t xt_usba_req;
341 } xhci_transfer_t;
342
343 /*
344 * This represents a ring in xHCI, upon which event, transfer, and command TRBs
345 * are scheduled.
346 */
347 typedef struct xhci_ring {
348 xhci_dma_buffer_t xr_dma;
349 uint_t xr_ntrb;
350 xhci_trb_t *xr_trb;
351 uint_t xr_head;
352 uint_t xr_tail;
353 uint8_t xr_cycle;
354 } xhci_ring_t;
355
356 /*
357 * This structure is used to represent the xHCI Device Context Base Address
358 * Array. It's defined in section 6.1 of the specification and is required for
359 * the controller to start.
360 *
361 * The maximum number of slots supported is always 256, therefore we size this
362 * structure at its maximum.
363 */
364 #define XHCI_MAX_SLOTS 256
365 #define XHCI_DCBAA_SCRATCHPAD_INDEX 0
366
367 typedef struct xhci_dcbaa {
368 uint64_t *xdc_base_addrs;
369 xhci_dma_buffer_t xdc_dma;
370 } xhci_dcbaa_t;
371
372 typedef struct xhci_scratchpad {
373 uint64_t *xsp_addrs;
374 xhci_dma_buffer_t xsp_addr_dma;
375 xhci_dma_buffer_t *xsp_scratch_dma;
376 } xhci_scratchpad_t;
377
378 /*
379 * Contexts. These structures are inserted into the DCBAA above and are used for
380 * describing the state of the system. Note, that while many of these are
381 * 32-bytes in size, the xHCI specification defines that they'll be extended to
382 * 64-bytes with all the extra bytes as zeros if the CSZ flag is set in the
383 * HCCPARAMS1 register, e.g. we have the flag XCAP_CSZ set.
384 *
385 * The device context covers the slot context and 31 endpoints.
386 */
387 #define XHCI_DEVICE_CONTEXT_32 1024
388 #define XHCI_DEVICE_CONTEXT_64 2048
389 #define XHCI_NUM_ENDPOINTS 31
390 #define XHCI_DEFAULT_ENDPOINT 0
391
392 #pragma pack(1)
393 typedef struct xhci_slot_context {
394 uint32_t xsc_info;
395 uint32_t xsc_info2;
396 uint32_t xsc_tt;
397 uint32_t xsc_state;
398 uint32_t xsc_reserved[4];
399 } xhci_slot_context_t;
400
401 typedef struct xhci_endpoint_context {
402 uint32_t xec_info;
403 uint32_t xec_info2;
404 uint64_t xec_dequeue;
405 uint32_t xec_txinfo;
406 uint32_t xec_reserved[3];
407 } xhci_endpoint_context_t;
408
409 typedef struct xhci_input_context {
410 uint32_t xic_drop_flags;
411 uint32_t xic_add_flags;
412 uint32_t xic_reserved[6];
413 } xhci_input_context_t;
414 #pragma pack()
415
416 /*
417 * Definitions and structures for maintaining the event ring.
418 */
419 #define XHCI_EVENT_NSEGS 1
420
421 #pragma pack(1)
422 typedef struct xhci_event_segment {
423 uint64_t xes_addr;
424 uint16_t xes_size;
425 uint16_t xes_rsvd0;
426 uint32_t xes_rsvd1;
427 } xhci_event_segment_t;
428 #pragma pack()
429
430 typedef struct xhci_event_ring {
431 xhci_event_segment_t *xev_segs;
432 xhci_dma_buffer_t xev_dma;
433 xhci_ring_t xev_ring;
434 } xhci_event_ring_t;
435
436 typedef enum xhci_command_ring_state {
437 XHCI_COMMAND_RING_IDLE = 0x00,
438 XHCI_COMMAND_RING_RUNNING = 0x01,
439 XHCI_COMMAND_RING_ABORTING = 0x02,
440 XHCI_COMMAND_RING_ABORT_DONE = 0x03
441 } xhci_command_ring_state_t;
442
443 typedef struct xhci_command_ring {
444 xhci_ring_t xcr_ring;
445 kmutex_t xcr_lock;
446 kcondvar_t xcr_cv;
447 list_t xcr_commands;
448 timeout_id_t xcr_timeout;
449 xhci_command_ring_state_t xcr_state;
450 } xhci_command_ring_t;
451
452 /*
453 * Individual command states.
454 *
455 * XHCI_COMMAND_S_INIT The command has yet to be inserted into the
456 * command ring.
457 *
458 * XHCI_COMMAND_S_QUEUED The command is queued in the command ring.
459 *
460 * XHCI_COMMAND_S_RECEIVED A command completion for this was received.
461 *
462 * XHCI_COMMAND_S_DONE The command has been executed. Note that it may
463 * have been aborted.
464 *
465 * XHCI_COMMAND_S_RESET The ring is being reset due to a fatal error and
466 * this command has been removed from the ring.
467 * This means it has been aborted, but it was not
468 * the cause of the abort.
469 *
470 * Note, when adding states, anything after XHCI_COMMAND_S_DONE implies that
471 * upon reaching this state, it is no longer in the ring.
472 */
473 typedef enum xhci_command_state {
474 XHCI_COMMAND_S_INIT = 0x00,
475 XHCI_COMMAND_S_QUEUED = 0x01,
476 XHCI_COMMAND_S_RECEIVED = 0x02,
477 XHCI_COMMAND_S_DONE = 0x03,
478 XHCI_COMMAND_S_RESET = 0x04
479 } xhci_command_state_t;
480
481 /*
482 * The TRB contents here are always kept in host byte order and are transformed
483 * to little endian when actually scheduled on the ring.
484 */
485 typedef struct xhci_command {
486 list_node_t xco_link;
487 kcondvar_t xco_cv;
488 xhci_trb_t xco_req;
489 xhci_trb_t xco_res;
490 xhci_command_state_t xco_state;
491 } xhci_command_t;
492
493 typedef enum xhci_endpoint_state {
494 XHCI_ENDPOINT_PERIODIC = 0x01,
495 XHCI_ENDPOINT_HALTED = 0x02,
496 XHCI_ENDPOINT_QUIESCE = 0x04,
497 XHCI_ENDPOINT_TIMED_OUT = 0x08,
498 /*
499 * This is a composite of states that we need to watch for. We don't
500 * want to allow ourselves to set one of these flags while one of them
501 * is currently active.
502 */
503 XHCI_ENDPOINT_SERIALIZE = 0x0c,
504 /*
505 * This is a composite of states that we need to make sure that if set,
506 * we do not schedule activity on the ring.
507 */
508 XHCI_ENDPOINT_DONT_SCHEDULE = 0x0e,
509 /*
510 * This enpdoint is being torn down and should make sure it de-schedules
511 * itself.
512 */
513 XHCI_ENDPOINT_TEARDOWN = 0x10
514 } xhci_endpoint_state_t;
515
516 /*
517 * Forwards required for the endpoint
518 */
519 struct xhci_device;
520 struct xhci;
521
522 typedef struct xhci_endpoint {
523 struct xhci *xep_xhci;
524 struct xhci_device *xep_xd;
525 uint_t xep_num;
526 uint_t xep_type;
527 xhci_endpoint_state_t xep_state;
528 kcondvar_t xep_state_cv;
529 timeout_id_t xep_timeout;
530 list_t xep_transfers;
531 usba_pipe_handle_data_t *xep_pipe;
532 xhci_ring_t xep_ring;
533 } xhci_endpoint_t;
534
535 typedef struct xhci_device {
536 list_node_t xd_link;
537 usb_port_t xd_port;
538 uint8_t xd_slot;
539 boolean_t xd_addressed;
540 usba_device_t *xd_usbdev;
541 xhci_dma_buffer_t xd_ictx;
542 kmutex_t xd_imtx; /* Protects input contexts */
543 xhci_input_context_t *xd_input;
544 xhci_slot_context_t *xd_slotin;
545 xhci_endpoint_context_t *xd_endin[XHCI_NUM_ENDPOINTS];
546 xhci_dma_buffer_t xd_octx;
547 xhci_slot_context_t *xd_slotout;
548 xhci_endpoint_context_t *xd_endout[XHCI_NUM_ENDPOINTS];
549 xhci_endpoint_t *xd_endpoints[XHCI_NUM_ENDPOINTS];
550 } xhci_device_t;
551
552 typedef enum xhci_periodic_state {
553 XHCI_PERIODIC_POLL_IDLE = 0x0,
554 XHCI_PERIODIC_POLL_ACTIVE,
555 XHCI_PERIODIC_POLL_NOMEM,
556 XHCI_PERIODIC_POLL_STOPPING
557 } xhci_periodic_state_t;
558
559 typedef struct xhci_periodic_pipe {
560 xhci_periodic_state_t xpp_poll_state;
561 usb_opaque_t xpp_usb_req;
562 size_t xpp_tsize;
563 uint_t xpp_ntransfers;
564 xhci_transfer_t *xpp_transfers[XHCI_PERIODIC_IN_NTRANSFERS];
565 } xhci_periodic_pipe_t;
566
567 typedef struct xhci_pipe {
568 list_node_t xp_link;
569 hrtime_t xp_opentime;
570 usba_pipe_handle_data_t *xp_pipe;
571 xhci_endpoint_t *xp_ep;
572 xhci_periodic_pipe_t xp_periodic;
573 } xhci_pipe_t;
574
575 typedef struct xhci_usba {
576 usba_hcdi_ops_t *xa_ops;
577 ddi_dma_attr_t xa_dma_attr;
578 usb_dev_descr_t xa_dev_descr;
579 usb_ss_hub_descr_t xa_hub_descr;
580 usba_pipe_handle_data_t *xa_intr_cb_ph;
581 usb_intr_req_t *xa_intr_cb_req;
582 list_t xa_devices;
583 list_t xa_pipes;
584 } xhci_usba_t;
585
586 typedef enum xhci_attach_seq {
587 XHCI_ATTACH_FM = 0x1 << 0,
588 XHCI_ATTACH_PCI_CONFIG = 0x1 << 1,
589 XHCI_ATTACH_REGS_MAP = 0x1 << 2,
590 XHCI_ATTACH_INTR_ALLOC = 0x1 << 3,
591 XHCI_ATTACH_INTR_ADD = 0x1 << 4,
592 XHCI_ATTACH_SYNCH = 0x1 << 5,
593 XHCI_ATTACH_INTR_ENABLE = 0x1 << 6,
594 XHCI_ATTACH_STARTED = 0x1 << 7,
595 XHCI_ATTACH_USBA = 0x1 << 8,
596 XHCI_ATTACH_ROOT_HUB = 0x1 << 9
597 } xhci_attach_seq_t;
598
599 typedef enum xhci_state_flags {
600 XHCI_S_ERROR = 0x1 << 0
601 } xhci_state_flags_t;
602
603 typedef struct xhci {
604 dev_info_t *xhci_dip;
605 xhci_attach_seq_t xhci_seq;
606 int xhci_fm_caps;
607 ddi_acc_handle_t xhci_cfg_handle;
608 uint16_t xhci_vendor_id;
609 uint16_t xhci_device_id;
610 caddr_t xhci_regs_base;
611 ddi_acc_handle_t xhci_regs_handle;
612 uint_t xhci_regs_capoff;
613 uint_t xhci_regs_operoff;
614 uint_t xhci_regs_runoff;
615 uint_t xhci_regs_dooroff;
616 xhci_capability_t xhci_caps;
617 xhci_quirk_t xhci_quirks;
618 ddi_intr_handle_t xhci_intr_hdl;
619 int xhci_intr_num;
620 int xhci_intr_type;
621 uint_t xhci_intr_pri;
622 int xhci_intr_caps;
623 xhci_dcbaa_t xhci_dcbaa;
624 xhci_scratchpad_t xhci_scratchpad;
625 xhci_command_ring_t xhci_command;
626 xhci_event_ring_t xhci_event;
627 taskq_ent_t xhci_tqe;
628 kmutex_t xhci_lock;
629 kcondvar_t xhci_statecv;
630 xhci_state_flags_t xhci_state;
631 xhci_usba_t xhci_usba;
632 } xhci_t;
633
634 /*
635 * The xHCI memory mapped registers come in four different categories. The
636 * offset to them is variable. These represent the given register set that we're
637 * after.
638 */
639 typedef enum xhci_reg_type {
640 XHCI_R_CAP,
641 XHCI_R_OPER,
642 XHCI_R_RUN,
643 XHCI_R_DOOR
644 } xhci_reg_type_t;
645
646 /*
647 * Quirks related functions
648 */
649 extern void xhci_quirks_populate(xhci_t *);
650 extern void xhci_reroute_intel(xhci_t *);
651
652 /*
653 * Interrupt related functions
654 */
655 extern uint_t xhci_intr(caddr_t, caddr_t);
656 extern boolean_t xhci_ddi_intr_disable(xhci_t *);
657 extern boolean_t xhci_ddi_intr_enable(xhci_t *);
658 extern int xhci_intr_conf(xhci_t *);
659
660 /*
661 * DMA related functions
662 */
663 extern int xhci_check_dma_handle(xhci_t *, xhci_dma_buffer_t *);
664 extern void xhci_dma_acc_attr(xhci_t *, ddi_device_acc_attr_t *);
665 extern void xhci_dma_dma_attr(xhci_t *, ddi_dma_attr_t *);
666 extern void xhci_dma_scratchpad_attr(xhci_t *, ddi_dma_attr_t *);
667 extern void xhci_dma_transfer_attr(xhci_t *, ddi_dma_attr_t *, uint_t);
668 extern void xhci_dma_free(xhci_dma_buffer_t *);
669 extern boolean_t xhci_dma_alloc(xhci_t *, xhci_dma_buffer_t *, ddi_dma_attr_t *,
670 ddi_device_acc_attr_t *, boolean_t, size_t, boolean_t);
671 extern uint64_t xhci_dma_pa(xhci_dma_buffer_t *);
672
673 /*
674 * DMA Transfer Ring functions
675 */
676 extern xhci_transfer_t *xhci_transfer_alloc(xhci_t *, xhci_endpoint_t *, size_t,
677 uint_t, int);
678 extern void xhci_transfer_free(xhci_t *, xhci_transfer_t *);
679 extern void xhci_transfer_copy(xhci_transfer_t *, void *, size_t, boolean_t);
680 extern int xhci_transfer_sync(xhci_t *, xhci_transfer_t *, uint_t);
681 extern void xhci_transfer_trb_fill_data(xhci_endpoint_t *, xhci_transfer_t *,
682 int, boolean_t);
683 extern void xhci_transfer_calculate_isoc(xhci_device_t *, xhci_endpoint_t *,
684 uint_t, uint_t *, uint_t *);
685
686 /*
687 * Context (DCBAA, Scratchpad, Slot) functions
688 */
689 extern int xhci_context_init(xhci_t *);
690 extern void xhci_context_fini(xhci_t *);
691 extern boolean_t xhci_context_slot_output_init(xhci_t *, xhci_device_t *);
692 extern void xhci_context_slot_output_fini(xhci_t *, xhci_device_t *);
693
694 /*
695 * Command Ring Functions
696 */
697 extern int xhci_command_ring_init(xhci_t *);
698 extern void xhci_command_ring_fini(xhci_t *);
699 extern boolean_t xhci_command_event_callback(xhci_t *, xhci_trb_t *trb);
700
701 extern void xhci_command_init(xhci_command_t *);
702 extern void xhci_command_fini(xhci_command_t *);
703
704 extern int xhci_command_enable_slot(xhci_t *, uint8_t *);
705 extern int xhci_command_disable_slot(xhci_t *, uint8_t);
706 extern int xhci_command_set_address(xhci_t *, xhci_device_t *, boolean_t);
707 extern int xhci_command_configure_endpoint(xhci_t *, xhci_device_t *);
708 extern int xhci_command_evaluate_context(xhci_t *, xhci_device_t *);
709 extern int xhci_command_reset_endpoint(xhci_t *, xhci_device_t *,
710 xhci_endpoint_t *);
711 extern int xhci_command_set_tr_dequeue(xhci_t *, xhci_device_t *,
712 xhci_endpoint_t *);
713 extern int xhci_command_stop_endpoint(xhci_t *, xhci_device_t *,
714 xhci_endpoint_t *);
715
716 /*
717 * Event Ring Functions
718 */
719 extern int xhci_event_init(xhci_t *);
720 extern void xhci_event_fini(xhci_t *);
721 extern boolean_t xhci_event_process(xhci_t *);
722
723 /*
724 * General Ring functions
725 */
726 extern void xhci_ring_free(xhci_ring_t *);
727 extern int xhci_ring_reset(xhci_t *, xhci_ring_t *);
728 extern int xhci_ring_alloc(xhci_t *, xhci_ring_t *);
729
730 /*
731 * Event Ring (Consumer) oriented functions.
732 */
733 extern xhci_trb_t *xhci_ring_event_advance(xhci_ring_t *);
734
735
736 /*
737 * Command and Transfer Ring (Producer) oriented functions.
738 */
739 extern boolean_t xhci_ring_trb_tail_valid(xhci_ring_t *, uint64_t);
740 extern int xhci_ring_trb_valid_range(xhci_ring_t *, uint64_t, uint_t);
741
742 extern boolean_t xhci_ring_trb_space(xhci_ring_t *, uint_t);
743 extern void xhci_ring_trb_fill(xhci_ring_t *, uint_t, xhci_trb_t *, uint64_t *,
744 boolean_t);
745 extern void xhci_ring_trb_produce(xhci_ring_t *, uint_t);
746 extern boolean_t xhci_ring_trb_consumed(xhci_ring_t *, uint64_t);
747 extern void xhci_ring_trb_put(xhci_ring_t *, xhci_trb_t *);
748 extern void xhci_ring_skip(xhci_ring_t *);
749 extern void xhci_ring_skip_transfer(xhci_ring_t *, xhci_transfer_t *);
750
751 /*
752 * MMIO related functions. Note callers are responsible for checking with FM
753 * after accessing registers.
754 */
755 extern int xhci_check_regs_acc(xhci_t *);
756
757 extern uint8_t xhci_get8(xhci_t *, xhci_reg_type_t, uintptr_t);
758 extern uint16_t xhci_get16(xhci_t *, xhci_reg_type_t, uintptr_t);
759 extern uint32_t xhci_get32(xhci_t *, xhci_reg_type_t, uintptr_t);
760 extern uint64_t xhci_get64(xhci_t *, xhci_reg_type_t, uintptr_t);
761
762 extern void xhci_put8(xhci_t *, xhci_reg_type_t, uintptr_t, uint8_t);
763 extern void xhci_put16(xhci_t *, xhci_reg_type_t, uintptr_t, uint16_t);
764 extern void xhci_put32(xhci_t *, xhci_reg_type_t, uintptr_t, uint32_t);
765 extern void xhci_put64(xhci_t *, xhci_reg_type_t, uintptr_t, uint64_t);
766
767 /*
768 * Runtime FM related functions
769 */
770 extern void xhci_fm_runtime_reset(xhci_t *);
771
772 /*
773 * Endpoint related functions
774 */
775 extern int xhci_endpoint_init(xhci_t *, xhci_device_t *,
776 usba_pipe_handle_data_t *);
777 extern void xhci_endpoint_fini(xhci_device_t *, int);
778 extern int xhci_endpoint_update_default(xhci_t *, xhci_device_t *,
779 xhci_endpoint_t *);
780
781 extern int xhci_endpoint_setup_default_context(xhci_t *, xhci_device_t *,
782 xhci_endpoint_t *);
783
784 extern uint_t xhci_endpoint_pipe_to_epid(usba_pipe_handle_data_t *);
785 extern boolean_t xhci_endpoint_is_periodic_in(xhci_endpoint_t *);
786
787 extern int xhci_endpoint_quiesce(xhci_t *, xhci_device_t *, xhci_endpoint_t *);
788 extern int xhci_endpoint_schedule(xhci_t *, xhci_device_t *, xhci_endpoint_t *,
789 xhci_transfer_t *, boolean_t);
790 extern int xhci_endpoint_ring(xhci_t *, xhci_device_t *, xhci_endpoint_t *);
791 extern boolean_t xhci_endpoint_transfer_callback(xhci_t *, xhci_trb_t *);
792
793 /*
794 * USB Framework related functions
795 */
796 extern int xhci_hcd_init(xhci_t *);
797 extern void xhci_hcd_fini(xhci_t *);
798
799 /*
800 * Root hub related functions
801 */
802 extern int xhci_root_hub_init(xhci_t *);
803 extern int xhci_root_hub_fini(xhci_t *);
804 extern int xhci_root_hub_ctrl_req(xhci_t *, usba_pipe_handle_data_t *,
805 usb_ctrl_req_t *);
806 extern void xhci_root_hub_psc_callback(xhci_t *);
807 extern int xhci_root_hub_intr_root_enable(xhci_t *, usba_pipe_handle_data_t *,
808 usb_intr_req_t *);
809 extern void xhci_root_hub_intr_root_disable(xhci_t *);
810
811 /*
812 * Logging functions
813 */
814 extern void xhci_log(xhci_t *xhcip, const char *fmt, ...) __KPRINTFLIKE(2);
815 extern void xhci_error(xhci_t *xhcip, const char *fmt, ...) __KPRINTFLIKE(2);
816
817 /*
818 * Misc. data
819 */
820 extern void *xhci_soft_state;
821
822 #ifdef __cplusplus
823 }
824 #endif
825
826 #endif /* _SYS_USB_XHCI_XHCI_H */