Print this page
MFV: illumos-gate@2aba3acda67326648fd60aaf2bfb4e18ee8c04ed
9816 Multi-TRB xhci transfers should use event data
9817 xhci needs to always set slot context
8550 increase xhci bulk transfer sgl count
9818 xhci_transfer_get_tdsize can return values that are too large
Reviewed by: Alex Wilson <alex.wilson@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Approved by: Joshua M. Clulow <josh@sysmgr.org>
Author: Robert Mustacchi <rm@joyent.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/sys/usb/hcd/xhci/xhci.h
+++ new/usr/src/uts/common/sys/usb/hcd/xhci/xhci.h
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 - * Copyright 2016 Joyent, Inc.
13 + * Copyright (c) 2018, Joyent, Inc.
14 14 */
15 15
16 16 #ifndef _SYS_USB_XHCI_XHCI_H
17 17 #define _SYS_USB_XHCI_XHCI_H
18 18
19 19 /*
20 20 * Extensible Host Controller Interface (xHCI) USB Driver
21 21 */
22 22
23 23 #include <sys/conf.h>
24 24 #include <sys/ddi.h>
25 25 #include <sys/sunddi.h>
26 26 #include <sys/taskq_impl.h>
27 27 #include <sys/sysmacros.h>
28 28 #include <sys/usb/hcd/xhci/xhcireg.h>
29 29
30 30 #include <sys/usb/usba.h>
31 31 #include <sys/usb/usba/hcdi.h>
32 32 #include <sys/usb/hubd/hub.h>
33 33 #include <sys/usb/usba/hubdi.h>
34 34 #include <sys/usb/hubd/hubdvar.h>
35 35
36 36
37 37 #ifdef __cplusplus
38 38 extern "C" {
39 39 #endif
40 40
41 41 /*
42 42 * The base segment for DMA attributes was determined to be 4k based on xHCI 1.1
43 43 * / table 54: Data Structure Max Size, Boundary, and Alignment Requirement
44 44 * Summary. This indicates that the required alignment for most things is
45 45 * PAGESIZE, which in our current implementation is required to be 4K. We
46 46 * provide the ring segment value below for the things which need 64K alignment
47 47 *
48 48 * Similarly, in the same table, the maximum required alignment is 64 bytes,
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
49 49 * hence we use that for everything.
50 50 *
51 51 * Next is the scatter/gather lengths. For most of the data structures, we only
52 52 * want to have a single SGL entry, e.g. just a simple flat mapping. For many of
53 53 * our transfers, we use the same logic to simplify the implementation of the
54 54 * driver. However, for bulk transfers, which are the largest by far, we want to
55 55 * be able to leverage SGLs to give us more DMA flexibility.
56 56 *
57 57 * We can transfer up to 64K in one transfer request block (TRB) which
58 58 * corresponds to a single SGL entry. Each ring we create is a single page in
59 - * size and will support at most 256 TRBs. We've selected to use up to 8 SGLs
60 - * for these transfer cases. This allows us to put up to 512 KiB in a given
61 - * transfer request and in the worst case, we can have about 30 of them
62 - * outstanding. Experimentally, this has proven to be sufficient for most of the
63 - * drivers that we support today.
59 + * size and will support at most 256 TRBs. To try and give the operating system
60 + * flexibility when allocating DMA transfers, we've opted to allow up to 63
61 + * SGLs. Because there isn't a good way to support DMA windows with the xHCI
62 + * controller design, if this number is too small then DMA allocations and
63 + * binding might fail. If the DMA binding fails, the transfer will fail.
64 + *
65 + * The reason that we use 63 SGLs and not the expected 64 is that we always need
66 + * to allocate an additional TRB for the event data. This leaves us with a
67 + * nicely divisible number of entries.
68 + *
69 + * The final piece of this is the maximum sized transfer that the driver
70 + * advertises to the broader framework. This is currently sized at 512 KiB. For
71 + * reference the ehci driver sized this value at 640 KiB. It's important to
72 + * understand that this isn't reflected in the DMA attribute limitation, because
73 + * it's not an attribute of the hardware. Experimentally, this has proven to be
74 + * sufficient for most of the drivers that we support today. When considering
75 + * increasing this number, please note the impact that might have on the
76 + * required number of DMA SGL entries required to satisfy the allocation.
77 + *
78 + * The value of 512 KiB was originally based on the number of SGLs we supported
79 + * multiplied by the maximum transfer size. The original number of
80 + * XHCI_TRANSFER_DMA_SGL was 8. The 512 KiB value was based upon taking the
81 + * number of SGLs and assuming that each TRB used its maximum transfer size of
82 + * 64 KiB.
64 83 */
65 -#define XHCI_TRB_MAX_TRANSFER 65536
84 +#define XHCI_TRB_MAX_TRANSFER 65536 /* 64 KiB */
66 85 #define XHCI_DMA_ALIGN 64
67 86 #define XHCI_DEF_DMA_SGL 1
68 -#define XHCI_TRANSFER_DMA_SGL 8
69 -#define XHCI_MAX_TRANSFER (XHCI_TRB_MAX_TRANSFER * XHCI_TRANSFER_DMA_SGL)
70 -#define XHCI_DMA_STRUCT_SIZE 4096
87 +#define XHCI_TRANSFER_DMA_SGL 63
88 +#define XHCI_MAX_TRANSFER 524288 /* 512 KiB */
71 89
72 90 /*
73 91 * Properties and values for rerouting ehci ports to xhci.
74 92 */
75 93 #define XHCI_PROP_REROUTE_DISABLE 0
76 94 #define XHCI_PROP_REROUTE_DEFAULT 1
77 95
78 96 /*
79 97 * This number is a bit made up. Truthfully, the API here isn't the most useful
80 98 * for what we need to define as it should really be based on the endpoint that
81 99 * we're interested in rather than the device as a whole.
82 100 *
83 101 * We're basically being asked how many TRBs we're willing to schedule in one
84 102 * go. There's no great way to come up with this number, so we basically are
85 103 * making up something such that we use up a good portion of a ring, but not too
86 104 * much of it.
87 105 */
88 106 #define XHCI_ISOC_MAX_TRB 64
89 107
90 108 #ifdef DEBUG
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
91 109 #define XHCI_DMA_SYNC(dma, flag) VERIFY0(ddi_dma_sync( \
92 110 (dma).xdb_dma_handle, 0, 0, \
93 111 (flag)))
94 112 #else
95 113 #define XHCI_DMA_SYNC(dma, flag) ((void) ddi_dma_sync( \
96 114 (dma).xdb_dma_handle, 0, 0, \
97 115 (flag)))
98 116 #endif
99 117
100 118 /*
119 + * TRBs need to indicate the number of remaining USB packets in the overall
120 + * transfer. This is a 5-bit value, which means that the maximum value we can
121 + * store in that TRD field is 31.
122 + */
123 +#define XHCI_MAX_TDSIZE 31
124 +
125 +/*
101 126 * This defines a time in 2-ms ticks that is required to wait for the controller
102 127 * to be ready to go. Section 5.4.8 of the XHCI specification in the description
103 128 * of the PORTSC register indicates that the upper bound is 20 ms. Therefore the
104 129 * number of ticks is 10.
105 130 */
106 131 #define XHCI_POWER_GOOD 10
107 132
108 133 /*
109 134 * Definitions to determine the default number of interrupts. Note that we only
110 135 * bother with a single interrupt at this time, though we've arranged the driver
111 136 * to make it possible to request more if, for some unlikely reason, it becomes
112 137 * necessary.
113 138 */
114 139 #define XHCI_NINTR 1
115 140
116 141 /*
117 142 * Default interrupt modulation value. This enables us to have 4000 interrupts /
118 143 * second. This is supposed to be the default value of the controller. See xHCI
119 144 * 1.1 / 4.17.2 for more information.
120 145 */
121 -#define XHCI_IMOD_DEFAULT 0x000003F8U
146 +#define XHCI_IMOD_DEFAULT 0x000003F8U
122 147
123 148 /*
124 149 * Definitions that surround the default values used in various contexts. These
125 150 * come from various parts of the xHCI specification. In general, see xHCI 1.1 /
126 151 * 4.8.2. Note that the MPS_MASK is used for ISOCH and INTR endpoints which have
127 152 * different sizes.
128 153 *
129 154 * The burst member is a bit more complicated. By default for USB 2 devices, it
130 155 * only matters for ISOCH and INTR endpoints and so we use the macros below to
131 156 * pull it out of the endpoint description's max packet field. For USB 3, it
132 157 * matters for non-control endpoints. However, it comes out of a companion
133 158 * description.
134 159 *
135 160 * By default the mult member is zero for all cases except for super speed
136 161 * ISOCH endpoints, where it comes from the companion descriptor.
137 162 */
138 163 #define XHCI_CONTEXT_DEF_CERR 3
139 164 #define XHCI_CONTEXT_ISOCH_CERR 0
140 165 #define XHCI_CONTEXT_MPS_MASK 0x07ff
141 166 #define XHCI_CONTEXT_BURST_MASK 0x1800
142 167 #define XHCI_CONTEXT_BURST_SHIFT 11
143 168 #define XHCI_CONTEXT_DEF_MULT 0
144 169 #define XHCI_CONTEXT_DEF_MAX_ESIT 0
145 170 #define XHCI_CONTEXT_DEF_CTRL_ATL 8
146 171
147 172 /*
148 173 * This number represents the number of transfers that we'll set up for a given
149 174 * interrupt transfer. Note that the idea here is that we'll want to allocate a
150 175 * certain number of transfers to basically ensure that we'll always be able to
151 176 * have a transfer available, even if the system is a bit caught up in trying to
152 177 * process it and for some reason we can't fire the interrupt. As such, we
153 178 * basically want to have enough available that at the fastest interval (125 us)
154 179 * that we have enough. So in this case we choose 8, with the assumption that we
155 180 * should be able to process at least one in a given millisecond. Note that this
156 181 * is not based in fact and is really just as much a guess and a hope.
157 182 *
158 183 * While we could then use less resources for other interrupt transfers that are
159 184 * slower, starting with uniform resource usage will make things a bit easier.
160 185 */
161 186 #define XHCI_INTR_IN_NTRANSFERS 8
162 187
163 188 /*
164 189 * This number represents the number of xhci_transfer_t structures that we'll
165 190 * set up for a given isochronous transfer polling request. A given isochronous
166 191 * transfer may actually have multiple units of time associated with it. As
167 192 * such, we basically want to treat this like a case of classic double
168 193 * buffering. We have one ready to go while the other is being filled up. This
169 194 * will compensate for additional latency in the system. This is smaller than
170 195 * the Interrupt IN transfer case above as many callers may ask for multiple
171 196 * intervals in a single request.
172 197 */
173 198 #define XHCI_ISOC_IN_NTRANSFERS 2
174 199
175 200 #define XHCI_PERIODIC_IN_NTRANSFERS \
176 201 MAX(XHCI_ISOC_IN_NTRANSFERS, XHCI_INTR_IN_NTRANSFERS)
177 202
178 203 /*
179 204 * Mask for a route string which is a 20-bit value.
180 205 */
181 206 #define XHCI_ROUTE_MASK(x) ((x) & 0xfffff)
182 207
183 208 /*
184 209 * This is the default tick that we use for timeouts while endpoints have
185 210 * outstanding, active, non-periodic transfers. We choose one second as the USBA
186 211 * specifies timeouts in units of seconds. Note that this is in microseconds, so
187 212 * it can be fed into drv_usectohz().
188 213 */
189 214 #define XHCI_TICK_TIMEOUT_US (MICROSEC)
190 215
|
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
191 216 /*
192 217 * Set of bits that we need one of to indicate that this port has something
193 218 * interesting on it.
194 219 */
195 220 #define XHCI_HUB_INTR_CHANGE_MASK (XHCI_PS_CSC | XHCI_PS_PEC | \
196 221 XHCI_PS_WRC | XHCI_PS_OCC | XHCI_PS_PRC | XHCI_PS_PLC | XHCI_PS_CEC)
197 222
198 223 /*
199 224 * These represent known issues with various xHCI controllers.
200 225 *
201 - * XHCI_QUIRK_NO_MSI MSI support on this controller is known to be
202 - * broken.
226 + * XHCI_QUIRK_NO_MSI MSI support on this controller is known to be
227 + * broken.
203 228 *
204 - * XHCI_QUIRK_32_ONLY Only use 32-bit DMA addreses with this
205 - * controller.
229 + * XHCI_QUIRK_32_ONLY Only use 32-bit DMA addreses with this
230 + * controller.
206 231 *
207 - * XHCI_QUIRK_INTC_EHCI This is an Intel platform which supports
208 - * rerouting ports between EHCI and xHCI
209 - * controllers on the platform.
232 + * XHCI_QUIRK_INTC_EHCI This is an Intel platform which supports
233 + * rerouting ports between EHCI and xHCI
234 + * controllers on the platform.
210 235 */
211 236 typedef enum xhci_quirk {
212 237 XHCI_QUIRK_NO_MSI = 0x01,
213 238 XHCI_QUIRK_32_ONLY = 0x02,
214 239 XHCI_QUIRK_INTC_EHCI = 0x04
215 240 } xhci_quirk_t;
216 241
217 242 /*
218 243 * xHCI capability parameter flags. These are documented in xHCI 1.1 / 5.3.6.
219 244 */
220 245 typedef enum xhci_cap_flags {
221 - XCAP_AC64 = 0x001,
246 + XCAP_AC64 = 0x001,
222 247 XCAP_BNC = 0x002,
223 248 XCAP_CSZ = 0x004,
224 249 XCAP_PPC = 0x008,
225 250 XCAP_PIND = 0x010,
226 251 XCAP_LHRC = 0x020,
227 252 XCAP_LTC = 0x040,
228 253 XCAP_NSS = 0x080,
229 254 XCAP_PAE = 0x100,
230 255 XCAP_SPC = 0x200,
231 256 XCAP_SEC = 0x400,
232 257 XCAP_CFC = 0x800
233 258 } xchi_cap_flags_t;
234 259
235 260 /*
236 261 * Second set of capabilities, these are documented in xHCI 1.1 / 5.3.9.
237 262 */
238 263 typedef enum xhci_cap2_flags {
239 264 XCAP2_U3C = 0x01,
240 265 XCAP2_CMC = 0x02,
241 266 XCAP2_FMC = 0x04,
242 267 XCAP2_CTC = 0x08,
243 268 XCAP2_LEC = 0x10,
244 269 XCAP2_CIC = 0x20
245 270 } xhci_cap2_flags_t;
246 271
247 272 /*
248 273 * These represent and store the various capability registers that we'll need to
249 274 * use. In addition, we stash a few other versioning related bits here. Note
250 275 * that we cache more information than we might need so that we have it for
251 276 * debugging purposes.
252 277 */
253 278 typedef struct xhci_capability {
254 279 uint8_t xcap_usb_vers;
255 280 uint16_t xcap_hci_vers;
256 281 uint32_t xcap_pagesize;
257 282 uint8_t xcap_max_slots;
258 283 uint16_t xcap_max_intrs;
259 284 uint8_t xcap_max_ports;
260 285 boolean_t xcap_ist_micro;
261 286 uint8_t xcap_ist;
262 287 uint16_t xcap_max_esrt;
263 288 boolean_t xcap_scratch_restore;
264 289 uint16_t xcap_max_scratch;
265 290 uint8_t xcap_u1_lat;
266 291 uint16_t xcap_u2_lat;
267 292 xchi_cap_flags_t xcap_flags;
268 293 uint8_t xcap_max_psa;
269 294 uint16_t xcap_xecp_off;
270 295 xhci_cap2_flags_t xcap_flags2;
271 296 int xcap_intr_types;
272 297 } xhci_capability_t;
273 298
274 299 /*
275 300 * This represents a single logical DMA allocation. For the vast majority of
276 301 * non-transfer cases, it only represents a single DMA buffer and not a
277 302 * scatter-gather list.
278 303 */
279 304 typedef struct xhci_dma_buffer {
280 305 caddr_t xdb_va; /* Buffer VA */
281 306 size_t xdb_len; /* Buffer logical len */
282 307 ddi_acc_handle_t xdb_acc_handle; /* Access handle */
283 308 ddi_dma_handle_t xdb_dma_handle; /* DMA handle */
284 309 int xdb_ncookies; /* Number of actual cookies */
285 310 ddi_dma_cookie_t xdb_cookies[XHCI_TRANSFER_DMA_SGL];
286 311 } xhci_dma_buffer_t;
287 312
288 313 /*
289 314 * This is a single transfer descriptor. It's packed to match the hardware
290 315 * layout.
291 316 */
292 317 #pragma pack(1)
293 318 typedef struct xhci_trb {
294 319 uint64_t trb_addr;
295 320 uint32_t trb_status;
296 321 uint32_t trb_flags;
297 322 } xhci_trb_t;
298 323 #pragma pack()
299 324
300 325 /*
301 326 * This represents a single transfer that we want to allocate and perform.
302 327 */
|
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
303 328 typedef struct xhci_transfer {
304 329 list_node_t xt_link;
305 330 hrtime_t xt_sched_time;
306 331 xhci_dma_buffer_t xt_buffer;
307 332 uint_t xt_ntrbs;
308 333 uint_t xt_short;
309 334 uint_t xt_timeout;
310 335 usb_cr_t xt_cr;
311 336 boolean_t xt_data_tohost;
312 337 xhci_trb_t *xt_trbs;
338 + uint64_t *xt_trbs_pa;
313 339 usb_isoc_pkt_descr_t *xt_isoc;
314 340 usb_opaque_t xt_usba_req;
315 341 } xhci_transfer_t;
316 342
317 343 /*
318 344 * This represents a ring in xHCI, upon which event, transfer, and command TRBs
319 345 * are scheduled.
320 346 */
321 347 typedef struct xhci_ring {
322 348 xhci_dma_buffer_t xr_dma;
323 349 uint_t xr_ntrb;
324 350 xhci_trb_t *xr_trb;
325 351 uint_t xr_head;
326 352 uint_t xr_tail;
327 353 uint8_t xr_cycle;
328 354 } xhci_ring_t;
329 355
330 356 /*
331 357 * This structure is used to represent the xHCI Device Context Base Address
332 358 * Array. It's defined in section 6.1 of the specification and is required for
333 359 * the controller to start.
334 360 *
335 361 * The maximum number of slots supported is always 256, therefore we size this
336 362 * structure at its maximum.
337 363 */
338 364 #define XHCI_MAX_SLOTS 256
339 365 #define XHCI_DCBAA_SCRATCHPAD_INDEX 0
340 366
341 367 typedef struct xhci_dcbaa {
342 368 uint64_t *xdc_base_addrs;
343 369 xhci_dma_buffer_t xdc_dma;
344 370 } xhci_dcbaa_t;
345 371
346 372 typedef struct xhci_scratchpad {
347 373 uint64_t *xsp_addrs;
348 374 xhci_dma_buffer_t xsp_addr_dma;
349 375 xhci_dma_buffer_t *xsp_scratch_dma;
350 376 } xhci_scratchpad_t;
351 377
352 378 /*
353 379 * Contexts. These structures are inserted into the DCBAA above and are used for
354 380 * describing the state of the system. Note, that while many of these are
355 381 * 32-bytes in size, the xHCI specification defines that they'll be extended to
356 382 * 64-bytes with all the extra bytes as zeros if the CSZ flag is set in the
357 383 * HCCPARAMS1 register, e.g. we have the flag XCAP_CSZ set.
358 384 *
359 385 * The device context covers the slot context and 31 endpoints.
360 386 */
361 387 #define XHCI_DEVICE_CONTEXT_32 1024
362 388 #define XHCI_DEVICE_CONTEXT_64 2048
363 389 #define XHCI_NUM_ENDPOINTS 31
364 390 #define XHCI_DEFAULT_ENDPOINT 0
365 391
366 392 #pragma pack(1)
367 393 typedef struct xhci_slot_context {
368 394 uint32_t xsc_info;
369 395 uint32_t xsc_info2;
370 396 uint32_t xsc_tt;
371 397 uint32_t xsc_state;
372 398 uint32_t xsc_reserved[4];
373 399 } xhci_slot_context_t;
374 400
375 401 typedef struct xhci_endpoint_context {
376 402 uint32_t xec_info;
377 403 uint32_t xec_info2;
378 404 uint64_t xec_dequeue;
379 405 uint32_t xec_txinfo;
380 406 uint32_t xec_reserved[3];
381 407 } xhci_endpoint_context_t;
382 408
383 409 typedef struct xhci_input_context {
384 410 uint32_t xic_drop_flags;
385 411 uint32_t xic_add_flags;
386 412 uint32_t xic_reserved[6];
387 413 } xhci_input_context_t;
388 414 #pragma pack()
389 415
390 416 /*
391 417 * Definitions and structures for maintaining the event ring.
392 418 */
393 419 #define XHCI_EVENT_NSEGS 1
394 420
395 421 #pragma pack(1)
396 422 typedef struct xhci_event_segment {
397 423 uint64_t xes_addr;
398 424 uint16_t xes_size;
399 425 uint16_t xes_rsvd0;
400 426 uint32_t xes_rsvd1;
401 427 } xhci_event_segment_t;
402 428 #pragma pack()
403 429
404 430 typedef struct xhci_event_ring {
405 431 xhci_event_segment_t *xev_segs;
406 432 xhci_dma_buffer_t xev_dma;
407 433 xhci_ring_t xev_ring;
408 434 } xhci_event_ring_t;
409 435
410 436 typedef enum xhci_command_ring_state {
411 437 XHCI_COMMAND_RING_IDLE = 0x00,
412 438 XHCI_COMMAND_RING_RUNNING = 0x01,
413 439 XHCI_COMMAND_RING_ABORTING = 0x02,
414 440 XHCI_COMMAND_RING_ABORT_DONE = 0x03
415 441 } xhci_command_ring_state_t;
416 442
417 443 typedef struct xhci_command_ring {
418 444 xhci_ring_t xcr_ring;
419 445 kmutex_t xcr_lock;
|
↓ open down ↓ |
97 lines elided |
↑ open up ↑ |
420 446 kcondvar_t xcr_cv;
421 447 list_t xcr_commands;
422 448 timeout_id_t xcr_timeout;
423 449 xhci_command_ring_state_t xcr_state;
424 450 } xhci_command_ring_t;
425 451
426 452 /*
427 453 * Individual command states.
428 454 *
429 455 * XHCI_COMMAND_S_INIT The command has yet to be inserted into the
430 - * command ring.
456 + * command ring.
431 457 *
432 458 * XHCI_COMMAND_S_QUEUED The command is queued in the command ring.
433 459 *
434 460 * XHCI_COMMAND_S_RECEIVED A command completion for this was received.
435 461 *
436 462 * XHCI_COMMAND_S_DONE The command has been executed. Note that it may
437 - * have been aborted.
463 + * have been aborted.
438 464 *
439 465 * XHCI_COMMAND_S_RESET The ring is being reset due to a fatal error and
440 - * this command has been removed from the ring.
441 - * This means it has been aborted, but it was not
442 - * the cause of the abort.
466 + * this command has been removed from the ring.
467 + * This means it has been aborted, but it was not
468 + * the cause of the abort.
443 469 *
444 470 * Note, when adding states, anything after XHCI_COMMAND_S_DONE implies that
445 471 * upon reaching this state, it is no longer in the ring.
446 472 */
447 473 typedef enum xhci_command_state {
448 474 XHCI_COMMAND_S_INIT = 0x00,
449 475 XHCI_COMMAND_S_QUEUED = 0x01,
450 476 XHCI_COMMAND_S_RECEIVED = 0x02,
451 477 XHCI_COMMAND_S_DONE = 0x03,
452 478 XHCI_COMMAND_S_RESET = 0x04
453 479 } xhci_command_state_t;
454 480
455 481 /*
456 482 * The TRB contents here are always kept in host byte order and are transformed
457 483 * to little endian when actually scheduled on the ring.
458 484 */
459 485 typedef struct xhci_command {
460 486 list_node_t xco_link;
461 487 kcondvar_t xco_cv;
462 488 xhci_trb_t xco_req;
463 489 xhci_trb_t xco_res;
464 490 xhci_command_state_t xco_state;
465 491 } xhci_command_t;
466 492
467 493 typedef enum xhci_endpoint_state {
468 494 XHCI_ENDPOINT_PERIODIC = 0x01,
469 495 XHCI_ENDPOINT_HALTED = 0x02,
470 496 XHCI_ENDPOINT_QUIESCE = 0x04,
471 497 XHCI_ENDPOINT_TIMED_OUT = 0x08,
472 498 /*
473 499 * This is a composite of states that we need to watch for. We don't
474 500 * want to allow ourselves to set one of these flags while one of them
475 501 * is currently active.
476 502 */
477 503 XHCI_ENDPOINT_SERIALIZE = 0x0c,
478 504 /*
479 505 * This is a composite of states that we need to make sure that if set,
480 506 * we do not schedule activity on the ring.
481 507 */
482 508 XHCI_ENDPOINT_DONT_SCHEDULE = 0x0e,
483 509 /*
484 510 * This enpdoint is being torn down and should make sure it de-schedules
485 511 * itself.
486 512 */
487 513 XHCI_ENDPOINT_TEARDOWN = 0x10
488 514 } xhci_endpoint_state_t;
489 515
490 516 /*
491 517 * Forwards required for the endpoint
492 518 */
493 519 struct xhci_device;
494 520 struct xhci;
495 521
496 522 typedef struct xhci_endpoint {
497 523 struct xhci *xep_xhci;
498 524 struct xhci_device *xep_xd;
499 525 uint_t xep_num;
500 526 uint_t xep_type;
501 527 xhci_endpoint_state_t xep_state;
502 528 kcondvar_t xep_state_cv;
503 529 timeout_id_t xep_timeout;
504 530 list_t xep_transfers;
505 531 usba_pipe_handle_data_t *xep_pipe;
506 532 xhci_ring_t xep_ring;
507 533 } xhci_endpoint_t;
508 534
509 535 typedef struct xhci_device {
510 536 list_node_t xd_link;
511 537 usb_port_t xd_port;
512 538 uint8_t xd_slot;
513 539 boolean_t xd_addressed;
514 540 usba_device_t *xd_usbdev;
515 541 xhci_dma_buffer_t xd_ictx;
516 542 kmutex_t xd_imtx; /* Protects input contexts */
517 543 xhci_input_context_t *xd_input;
518 544 xhci_slot_context_t *xd_slotin;
519 545 xhci_endpoint_context_t *xd_endin[XHCI_NUM_ENDPOINTS];
520 546 xhci_dma_buffer_t xd_octx;
521 547 xhci_slot_context_t *xd_slotout;
522 548 xhci_endpoint_context_t *xd_endout[XHCI_NUM_ENDPOINTS];
523 549 xhci_endpoint_t *xd_endpoints[XHCI_NUM_ENDPOINTS];
524 550 } xhci_device_t;
525 551
526 552 typedef enum xhci_periodic_state {
527 553 XHCI_PERIODIC_POLL_IDLE = 0x0,
528 554 XHCI_PERIODIC_POLL_ACTIVE,
529 555 XHCI_PERIODIC_POLL_NOMEM,
530 556 XHCI_PERIODIC_POLL_STOPPING
531 557 } xhci_periodic_state_t;
532 558
533 559 typedef struct xhci_periodic_pipe {
534 560 xhci_periodic_state_t xpp_poll_state;
535 561 usb_opaque_t xpp_usb_req;
536 562 size_t xpp_tsize;
537 563 uint_t xpp_ntransfers;
538 564 xhci_transfer_t *xpp_transfers[XHCI_PERIODIC_IN_NTRANSFERS];
539 565 } xhci_periodic_pipe_t;
540 566
541 567 typedef struct xhci_pipe {
542 568 list_node_t xp_link;
543 569 hrtime_t xp_opentime;
544 570 usba_pipe_handle_data_t *xp_pipe;
545 571 xhci_endpoint_t *xp_ep;
546 572 xhci_periodic_pipe_t xp_periodic;
547 573 } xhci_pipe_t;
548 574
549 575 typedef struct xhci_usba {
550 576 usba_hcdi_ops_t *xa_ops;
551 577 ddi_dma_attr_t xa_dma_attr;
552 578 usb_dev_descr_t xa_dev_descr;
553 579 usb_ss_hub_descr_t xa_hub_descr;
554 580 usba_pipe_handle_data_t *xa_intr_cb_ph;
555 581 usb_intr_req_t *xa_intr_cb_req;
556 582 list_t xa_devices;
557 583 list_t xa_pipes;
558 584 } xhci_usba_t;
559 585
560 586 typedef enum xhci_attach_seq {
561 587 XHCI_ATTACH_FM = 0x1 << 0,
562 588 XHCI_ATTACH_PCI_CONFIG = 0x1 << 1,
563 589 XHCI_ATTACH_REGS_MAP = 0x1 << 2,
564 590 XHCI_ATTACH_INTR_ALLOC = 0x1 << 3,
565 591 XHCI_ATTACH_INTR_ADD = 0x1 << 4,
566 592 XHCI_ATTACH_SYNCH = 0x1 << 5,
567 593 XHCI_ATTACH_INTR_ENABLE = 0x1 << 6,
568 594 XHCI_ATTACH_STARTED = 0x1 << 7,
569 595 XHCI_ATTACH_USBA = 0x1 << 8,
570 596 XHCI_ATTACH_ROOT_HUB = 0x1 << 9
571 597 } xhci_attach_seq_t;
572 598
573 599 typedef enum xhci_state_flags {
574 600 XHCI_S_ERROR = 0x1 << 0
575 601 } xhci_state_flags_t;
576 602
577 603 typedef struct xhci {
578 604 dev_info_t *xhci_dip;
579 605 xhci_attach_seq_t xhci_seq;
580 606 int xhci_fm_caps;
581 607 ddi_acc_handle_t xhci_cfg_handle;
582 608 uint16_t xhci_vendor_id;
583 609 uint16_t xhci_device_id;
584 610 caddr_t xhci_regs_base;
585 611 ddi_acc_handle_t xhci_regs_handle;
586 612 uint_t xhci_regs_capoff;
587 613 uint_t xhci_regs_operoff;
588 614 uint_t xhci_regs_runoff;
589 615 uint_t xhci_regs_dooroff;
590 616 xhci_capability_t xhci_caps;
591 617 xhci_quirk_t xhci_quirks;
592 618 ddi_intr_handle_t xhci_intr_hdl;
593 619 int xhci_intr_num;
594 620 int xhci_intr_type;
595 621 uint_t xhci_intr_pri;
596 622 int xhci_intr_caps;
597 623 xhci_dcbaa_t xhci_dcbaa;
598 624 xhci_scratchpad_t xhci_scratchpad;
599 625 xhci_command_ring_t xhci_command;
600 626 xhci_event_ring_t xhci_event;
601 627 taskq_ent_t xhci_tqe;
602 628 kmutex_t xhci_lock;
603 629 kcondvar_t xhci_statecv;
604 630 xhci_state_flags_t xhci_state;
605 631 xhci_usba_t xhci_usba;
606 632 } xhci_t;
607 633
608 634 /*
609 635 * The xHCI memory mapped registers come in four different categories. The
610 636 * offset to them is variable. These represent the given register set that we're
611 637 * after.
612 638 */
613 639 typedef enum xhci_reg_type {
614 640 XHCI_R_CAP,
615 641 XHCI_R_OPER,
616 642 XHCI_R_RUN,
617 643 XHCI_R_DOOR
618 644 } xhci_reg_type_t;
619 645
620 646 /*
621 647 * Quirks related functions
622 648 */
623 649 extern void xhci_quirks_populate(xhci_t *);
624 650 extern void xhci_reroute_intel(xhci_t *);
625 651
626 652 /*
627 653 * Interrupt related functions
628 654 */
629 655 extern uint_t xhci_intr(caddr_t, caddr_t);
630 656 extern boolean_t xhci_ddi_intr_disable(xhci_t *);
631 657 extern boolean_t xhci_ddi_intr_enable(xhci_t *);
632 658 extern int xhci_intr_conf(xhci_t *);
633 659
634 660 /*
635 661 * DMA related functions
636 662 */
637 663 extern int xhci_check_dma_handle(xhci_t *, xhci_dma_buffer_t *);
638 664 extern void xhci_dma_acc_attr(xhci_t *, ddi_device_acc_attr_t *);
639 665 extern void xhci_dma_dma_attr(xhci_t *, ddi_dma_attr_t *);
640 666 extern void xhci_dma_scratchpad_attr(xhci_t *, ddi_dma_attr_t *);
|
↓ open down ↓ |
188 lines elided |
↑ open up ↑ |
641 667 extern void xhci_dma_transfer_attr(xhci_t *, ddi_dma_attr_t *, uint_t);
642 668 extern void xhci_dma_free(xhci_dma_buffer_t *);
643 669 extern boolean_t xhci_dma_alloc(xhci_t *, xhci_dma_buffer_t *, ddi_dma_attr_t *,
644 670 ddi_device_acc_attr_t *, boolean_t, size_t, boolean_t);
645 671 extern uint64_t xhci_dma_pa(xhci_dma_buffer_t *);
646 672
647 673 /*
648 674 * DMA Transfer Ring functions
649 675 */
650 676 extern xhci_transfer_t *xhci_transfer_alloc(xhci_t *, xhci_endpoint_t *, size_t,
651 - int, int);
677 + uint_t, int);
652 678 extern void xhci_transfer_free(xhci_t *, xhci_transfer_t *);
653 679 extern void xhci_transfer_copy(xhci_transfer_t *, void *, size_t, boolean_t);
654 680 extern int xhci_transfer_sync(xhci_t *, xhci_transfer_t *, uint_t);
655 681 extern void xhci_transfer_trb_fill_data(xhci_endpoint_t *, xhci_transfer_t *,
656 682 int, boolean_t);
657 683 extern void xhci_transfer_calculate_isoc(xhci_device_t *, xhci_endpoint_t *,
658 684 uint_t, uint_t *, uint_t *);
659 685
660 686 /*
661 687 * Context (DCBAA, Scratchpad, Slot) functions
662 688 */
663 689 extern int xhci_context_init(xhci_t *);
664 690 extern void xhci_context_fini(xhci_t *);
665 691 extern boolean_t xhci_context_slot_output_init(xhci_t *, xhci_device_t *);
666 692 extern void xhci_context_slot_output_fini(xhci_t *, xhci_device_t *);
667 693
668 694 /*
669 695 * Command Ring Functions
670 696 */
671 697 extern int xhci_command_ring_init(xhci_t *);
672 698 extern void xhci_command_ring_fini(xhci_t *);
673 699 extern boolean_t xhci_command_event_callback(xhci_t *, xhci_trb_t *trb);
674 700
675 701 extern void xhci_command_init(xhci_command_t *);
676 702 extern void xhci_command_fini(xhci_command_t *);
677 703
678 704 extern int xhci_command_enable_slot(xhci_t *, uint8_t *);
679 705 extern int xhci_command_disable_slot(xhci_t *, uint8_t);
680 706 extern int xhci_command_set_address(xhci_t *, xhci_device_t *, boolean_t);
681 707 extern int xhci_command_configure_endpoint(xhci_t *, xhci_device_t *);
682 708 extern int xhci_command_evaluate_context(xhci_t *, xhci_device_t *);
683 709 extern int xhci_command_reset_endpoint(xhci_t *, xhci_device_t *,
684 710 xhci_endpoint_t *);
685 711 extern int xhci_command_set_tr_dequeue(xhci_t *, xhci_device_t *,
686 712 xhci_endpoint_t *);
687 713 extern int xhci_command_stop_endpoint(xhci_t *, xhci_device_t *,
688 714 xhci_endpoint_t *);
689 715
690 716 /*
691 717 * Event Ring Functions
692 718 */
693 719 extern int xhci_event_init(xhci_t *);
694 720 extern void xhci_event_fini(xhci_t *);
695 721 extern boolean_t xhci_event_process(xhci_t *);
696 722
697 723 /*
698 724 * General Ring functions
699 725 */
700 726 extern void xhci_ring_free(xhci_ring_t *);
701 727 extern int xhci_ring_reset(xhci_t *, xhci_ring_t *);
702 728 extern int xhci_ring_alloc(xhci_t *, xhci_ring_t *);
703 729
704 730 /*
705 731 * Event Ring (Consumer) oriented functions.
706 732 */
|
↓ open down ↓ |
45 lines elided |
↑ open up ↑ |
707 733 extern xhci_trb_t *xhci_ring_event_advance(xhci_ring_t *);
708 734
709 735
710 736 /*
711 737 * Command and Transfer Ring (Producer) oriented functions.
712 738 */
713 739 extern boolean_t xhci_ring_trb_tail_valid(xhci_ring_t *, uint64_t);
714 740 extern int xhci_ring_trb_valid_range(xhci_ring_t *, uint64_t, uint_t);
715 741
716 742 extern boolean_t xhci_ring_trb_space(xhci_ring_t *, uint_t);
717 -extern void xhci_ring_trb_fill(xhci_ring_t *, uint_t, xhci_trb_t *, boolean_t);
743 +extern void xhci_ring_trb_fill(xhci_ring_t *, uint_t, xhci_trb_t *, uint64_t *,
744 + boolean_t);
718 745 extern void xhci_ring_trb_produce(xhci_ring_t *, uint_t);
719 746 extern boolean_t xhci_ring_trb_consumed(xhci_ring_t *, uint64_t);
720 747 extern void xhci_ring_trb_put(xhci_ring_t *, xhci_trb_t *);
721 748 extern void xhci_ring_skip(xhci_ring_t *);
722 749 extern void xhci_ring_skip_transfer(xhci_ring_t *, xhci_transfer_t *);
723 750
724 751 /*
725 752 * MMIO related functions. Note callers are responsible for checking with FM
726 753 * after accessing registers.
727 754 */
728 755 extern int xhci_check_regs_acc(xhci_t *);
729 756
730 757 extern uint8_t xhci_get8(xhci_t *, xhci_reg_type_t, uintptr_t);
731 758 extern uint16_t xhci_get16(xhci_t *, xhci_reg_type_t, uintptr_t);
732 759 extern uint32_t xhci_get32(xhci_t *, xhci_reg_type_t, uintptr_t);
733 760 extern uint64_t xhci_get64(xhci_t *, xhci_reg_type_t, uintptr_t);
734 761
735 762 extern void xhci_put8(xhci_t *, xhci_reg_type_t, uintptr_t, uint8_t);
736 763 extern void xhci_put16(xhci_t *, xhci_reg_type_t, uintptr_t, uint16_t);
737 764 extern void xhci_put32(xhci_t *, xhci_reg_type_t, uintptr_t, uint32_t);
738 765 extern void xhci_put64(xhci_t *, xhci_reg_type_t, uintptr_t, uint64_t);
739 766
740 767 /*
741 768 * Runtime FM related functions
742 769 */
743 770 extern void xhci_fm_runtime_reset(xhci_t *);
744 771
745 772 /*
746 773 * Endpoint related functions
747 774 */
748 775 extern int xhci_endpoint_init(xhci_t *, xhci_device_t *,
749 776 usba_pipe_handle_data_t *);
750 777 extern void xhci_endpoint_fini(xhci_device_t *, int);
751 778 extern int xhci_endpoint_update_default(xhci_t *, xhci_device_t *,
752 779 xhci_endpoint_t *);
753 780
754 781 extern int xhci_endpoint_setup_default_context(xhci_t *, xhci_device_t *,
755 782 xhci_endpoint_t *);
756 783
757 784 extern uint_t xhci_endpoint_pipe_to_epid(usba_pipe_handle_data_t *);
758 785 extern boolean_t xhci_endpoint_is_periodic_in(xhci_endpoint_t *);
759 786
760 787 extern int xhci_endpoint_quiesce(xhci_t *, xhci_device_t *, xhci_endpoint_t *);
761 788 extern int xhci_endpoint_schedule(xhci_t *, xhci_device_t *, xhci_endpoint_t *,
762 789 xhci_transfer_t *, boolean_t);
763 790 extern int xhci_endpoint_ring(xhci_t *, xhci_device_t *, xhci_endpoint_t *);
764 791 extern boolean_t xhci_endpoint_transfer_callback(xhci_t *, xhci_trb_t *);
765 792
766 793 /*
767 794 * USB Framework related functions
768 795 */
769 796 extern int xhci_hcd_init(xhci_t *);
770 797 extern void xhci_hcd_fini(xhci_t *);
771 798
772 799 /*
773 800 * Root hub related functions
774 801 */
775 802 extern int xhci_root_hub_init(xhci_t *);
776 803 extern int xhci_root_hub_fini(xhci_t *);
777 804 extern int xhci_root_hub_ctrl_req(xhci_t *, usba_pipe_handle_data_t *,
778 805 usb_ctrl_req_t *);
779 806 extern void xhci_root_hub_psc_callback(xhci_t *);
780 807 extern int xhci_root_hub_intr_root_enable(xhci_t *, usba_pipe_handle_data_t *,
781 808 usb_intr_req_t *);
782 809 extern void xhci_root_hub_intr_root_disable(xhci_t *);
783 810
784 811 /*
785 812 * Logging functions
786 813 */
787 814 extern void xhci_log(xhci_t *xhcip, const char *fmt, ...) __KPRINTFLIKE(2);
788 815 extern void xhci_error(xhci_t *xhcip, const char *fmt, ...) __KPRINTFLIKE(2);
789 816
790 817 /*
791 818 * Misc. data
792 819 */
793 820 extern void *xhci_soft_state;
794 821
795 822 #ifdef __cplusplus
796 823 }
797 824 #endif
798 825
799 826 #endif /* _SYS_USB_XHCI_XHCI_H */
|
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX