1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright (c) 2018, Joyent, Inc.
14 */
15
16 /*
17 * xHCI Endpoint Initialization and Management
18 *
19 * Please see the big theory statement in xhci.c for more information.
20 */
21
22 #include <sys/usb/hcd/xhci/xhci.h>
23 #include <sys/sdt.h>
24
25 boolean_t
26 xhci_endpoint_is_periodic_in(xhci_endpoint_t *xep)
27 {
28 usba_pipe_handle_data_t *ph;
29
30 ASSERT(xep != NULL);
31 ph = xep->xep_pipe;
32 ASSERT(ph != NULL);
33
34 return ((xep->xep_type == USB_EP_ATTR_INTR ||
35 xep->xep_type == USB_EP_ATTR_ISOCH) &&
36 (ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN);
37 }
38
39 /*
40 * Endpoints are a bit weirdly numbered. Endpoint zero is the default control
41 * endpoint, so the direction doesn't matter. For all the others, they're
42 * arranged as ep 1 out, ep 1 in, ep 2 out, ep 2 in. This is based on the layout
43 * of the Device Context Structure in xHCI 1.1 / 6.2.1. Therefore to go from the
44 * endpoint and direction, we know that endpoint n starts at 2n - 1. e.g.
45 * endpoint 1 starts at entry 1, endpoint 2 at entry 3, etc. Finally, the OUT
46 * direction comes first, followed by the IN direction. So if we're getting the
47 * endpoint for one of those, then we have to deal with that.
48 */
49 uint_t
50 xhci_endpoint_pipe_to_epid(usba_pipe_handle_data_t *ph)
51 {
52 int ep;
53
54 ep = ph->p_ep.bEndpointAddress & USB_EP_NUM_MASK;
55 if (ep == 0)
56 return (ep);
57 ep = ep * 2 - 1;
58 if ((ph->p_ep.bEndpointAddress & USB_EP_DIR_MASK) == USB_EP_DIR_IN)
59 ep++;
60
61 VERIFY(ep < XHCI_NUM_ENDPOINTS);
62 return (ep);
63 }
64
65 /*
66 * The assumption is that someone calling this owns this endpoint / device and
67 * that it's in a state where it's safe to zero out that information.
68 */
69 void
70 xhci_endpoint_fini(xhci_device_t *xd, int endpoint)
71 {
72 xhci_endpoint_t *xep = xd->xd_endpoints[endpoint];
73
74 VERIFY(xep != NULL);
75 xd->xd_endpoints[endpoint] = NULL;
76
77 xhci_ring_free(&xep->xep_ring);
78 cv_destroy(&xep->xep_state_cv);
79 list_destroy(&xep->xep_transfers);
80 kmem_free(xep, sizeof (xhci_endpoint_t));
81 }
82
83 /*
84 * Set up the default control endpoint input context. This needs to be done
85 * before we address the device. Note, we separate out the default endpoint from
86 * others, as we must set this up before we have a pipe handle.
87 */
88 int
89 xhci_endpoint_setup_default_context(xhci_t *xhcip, xhci_device_t *xd,
90 xhci_endpoint_t *xep)
91 {
92 uint_t mps;
93 xhci_endpoint_context_t *ectx;
94 uint64_t deq;
95
96 ectx = xd->xd_endin[xep->xep_num];
97 VERIFY(ectx != NULL);
98
99 /*
100 * We may or may not have a device descriptor. This should match the
101 * same initial sizes that are done in hubd_create_child().
102 *
103 * Note, since we don't necessarily have an endpoint descriptor yet to
104 * base this on we instead use the device's defaults if available. This
105 * is different from normal endpoints for which there's always a
106 * specific descriptor.
107 */
108 switch (xd->xd_usbdev->usb_port_status) {
109 case USBA_LOW_SPEED_DEV:
110 if (xd->xd_usbdev->usb_dev_descr != NULL) {
111 mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
112 } else {
113 mps = 8;
114 }
115 break;
116 case USBA_FULL_SPEED_DEV:
117 case USBA_HIGH_SPEED_DEV:
118 if (xd->xd_usbdev->usb_dev_descr != NULL) {
119 mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
120 } else {
121 mps = 64;
122 }
123 break;
124 case USBA_SUPER_SPEED_DEV:
125 default:
126 if (xd->xd_usbdev->usb_dev_descr != NULL) {
127 mps = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
128 mps = 1 << mps;
129 } else {
130 mps = 512;
131 }
132 break;
133 }
134
135 bzero(ectx, sizeof (xhci_endpoint_context_t));
136 ectx->xec_info = LE_32(0);
137 ectx->xec_info2 = LE_32(XHCI_EPCTX_SET_CERR(3) |
138 XHCI_EPCTX_SET_EPTYPE(XHCI_EPCTX_TYPE_CTRL) |
139 XHCI_EPCTX_SET_MAXB(0) | XHCI_EPCTX_SET_MPS(mps));
140 deq = xhci_dma_pa(&xep->xep_ring.xr_dma) + sizeof (xhci_trb_t) *
141 xep->xep_ring.xr_tail;
142 ectx->xec_dequeue = LE_64(deq | xep->xep_ring.xr_cycle);
143 ectx->xec_txinfo = LE_32(XHCI_EPCTX_MAX_ESIT_PAYLOAD(0) |
144 XHCI_EPCTX_AVG_TRB_LEN(XHCI_CONTEXT_DEF_CTRL_ATL));
145
146 XHCI_DMA_SYNC(xd->xd_ictx, DDI_DMA_SYNC_FORDEV);
147 if (xhci_check_dma_handle(xhcip, &xd->xd_ictx) != DDI_FM_OK) {
148 xhci_error(xhcip, "failed to initialize default device input "
149 "context on slot %d and port %d for endpoint %u: "
150 "encountered fatal FM error synchronizing input context "
151 "DMA memory", xd->xd_slot, xd->xd_port, xep->xep_num);
152 xhci_fm_runtime_reset(xhcip);
153 return (EIO);
154 }
155
156 return (0);
157 }
158
159 /*
160 * Determine if we need to update the maximum packet size of the default
161 * control endpoint. This may happen because we start with the default size
162 * before we have a descriptor and then it may change. For example, with
163 * full-speed devices that may have either an 8 or 64 byte maximum packet size.
164 */
165 int
166 xhci_endpoint_update_default(xhci_t *xhcip, xhci_device_t *xd,
167 xhci_endpoint_t *xep)
168 {
169 int mps, desc, info, ret;
170 ASSERT(xd->xd_usbdev != NULL);
171
172 mps = XHCI_EPCTX_GET_MPS(xd->xd_endout[xep->xep_num]->xec_info2);
173 desc = xd->xd_usbdev->usb_dev_descr->bMaxPacketSize0;
174 if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
175 desc = 1 << desc;
176 }
177
178 if (mps == desc)
179 return (USB_SUCCESS);
180
181 /*
182 * Update only the context for the default control endpoint.
183 */
184 mutex_enter(&xd->xd_imtx);
185 info = LE_32(xd->xd_endout[xep->xep_num]->xec_info2);
186 info &= ~XHCI_EPCTX_SET_MPS(mps);
187 info |= XHCI_EPCTX_SET_MPS(desc);
188 xd->xd_endin[xep->xep_num]->xec_info2 = LE_32(info);
189 xd->xd_input->xic_drop_flags = LE_32(0);
190 xd->xd_input->xic_add_flags = LE_32(XHCI_INCTX_MASK_DCI(1));
191
192 ret = xhci_command_evaluate_context(xhcip, xd);
193 mutex_exit(&xd->xd_imtx);
194
195 return (ret);
196 }
197
198 static uint_t
199 xhci_endpoint_epdesc_to_type(usb_ep_descr_t *ep)
200 {
201 int type = ep->bmAttributes & USB_EP_ATTR_MASK;
202 boolean_t in = (ep->bEndpointAddress & USB_EP_DIR_MASK) ==
203 USB_EP_DIR_IN;
204
205 switch (type) {
206 case USB_EP_ATTR_CONTROL:
207 return (XHCI_EPCTX_TYPE_CTRL);
208 case USB_EP_ATTR_ISOCH:
209 if (in == B_TRUE)
210 return (XHCI_EPCTX_TYPE_ISOCH_IN);
211 return (XHCI_EPCTX_TYPE_ISOCH_OUT);
212 case USB_EP_ATTR_BULK:
213 if (in == B_TRUE)
214 return (XHCI_EPCTX_TYPE_BULK_IN);
215 return (XHCI_EPCTX_TYPE_BULK_OUT);
216 case USB_EP_ATTR_INTR:
217 if (in == B_TRUE)
218 return (XHCI_EPCTX_TYPE_INTR_IN);
219 return (XHCI_EPCTX_TYPE_INTR_OUT);
220 default:
221 panic("bad USB attribute type: %d", type);
222 }
223
224 /* LINTED: E_FUNC_NO_RET_VAL */
225 }
226
227 static uint_t
228 xhci_endpoint_determine_burst(xhci_device_t *xd, xhci_endpoint_t *xep)
229 {
230 switch (xd->xd_usbdev->usb_port_status) {
231 case USBA_LOW_SPEED_DEV:
232 case USBA_FULL_SPEED_DEV:
233 /*
234 * Per xHCI 1.1 / 6.2.3.4, burst is always zero for these
235 * devices.
236 */
237 return (0);
238 case USBA_HIGH_SPEED_DEV:
239 if (xep->xep_type == USB_EP_ATTR_CONTROL ||
240 xep->xep_type == USB_EP_ATTR_BULK)
241 return (0);
242 return ((xep->xep_pipe->p_xep.uex_ep.wMaxPacketSize &
243 XHCI_CONTEXT_BURST_MASK) >> XHCI_CONTEXT_BURST_SHIFT);
244 default:
245 /*
246 * For these USB >= 3.0, this comes from the companion
247 * descriptor.
248 */
249 ASSERT(xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP);
250 return (xep->xep_pipe->p_xep.uex_ep_ss.bMaxBurst);
251 }
252 }
253
254 /*
255 * Convert a linear mapping of values that are in in the range of 1-255 into a
256 * 2^x value. Because we're supposed to round down for these calculations (see
257 * the note in xHCI 1.1 / 6.2.3.6) we can do this simply with a fls() and
258 * subtracting one.
259 */
260 static uint_t
261 xhci_endpoint_linear_interval(usb_ep_descr_t *ep)
262 {
263 int exp;
264 int ival = ep->bInterval;
265 if (ival < 1)
266 ival = 1;
267 if (ival > 255)
268 ival = 255;
269 exp = ddi_fls(ival) - 1;
270 ASSERT(exp >= 0 && exp <= 7);
271 return (exp);
272 }
273
274 /*
275 * Convert the set of values that use a 2^(x-1) value for interval into a 2^x
276 * range. Note the valid input range is 1-16, so we clamp values based on this.
277 * See xHCI 1.1 / 6.2.3.6 for more information.
278 */
279 static uint_t
280 xhci_endpoint_exponential_interval(usb_ep_descr_t *ep)
281 {
282 int ival;
283
284 ival = ep->bInterval;
285 if (ival < 1)
286 ival = 1;
287 if (ival > 16)
288 ival = 16;
289 ival--;
290 ASSERT(ival >= 0 && ival <= 15);
291 return (ival);
292 }
293
294
295 /*
296 * Determining the interval is unfortunately somewhat complicated as there are
297 * many differnet forms that things can take. This is all summarized in a
298 * somewhat helpful table, number 65, in xHCI 1.1 / 6.2.3.6. But here's
299 * basically the six different cases we have to consider:
300 *
301 * Case 1: Non-High Speed Bulk and Control Endpoints
302 * Always return 0.
303 *
304 * Case 2: Super Speed and High Speed Isoch and Intr endpoints
305 * Convert from a 2^(x-1) range to a 2^x range.
306 *
307 * Case 3: Full Speed Isochronous Endpoints
308 * As case 2, but add 3 as its values are in frames and we need to convert
309 * to microframes. Adding three to the result is the same as multiplying
310 * the initial value by 8.
311 *
312 * Case 4: Full speed and Low Speed Interrupt Endpoints
313 * These have a 1-255 ms range that we need to convert to a 2^x * 128 us
314 * range. We use the linear conversion and then add 3 to account for the
315 * multiplying by 8 conversion from frames to microframes.
316 *
317 * Case 5: High Speed Interrupt and Bulk Output
318 * These are a bit of a weird case. The spec and other implementations make
319 * it seem that it's similar to case 4, but without the fixed addition as
320 * its interpreted differently due to NAKs.
321 *
322 * Case 6: Low Speed Isochronous Endpoints
323 * These are not actually defined; however, like other implementations we
324 * treat them like case 4.
325 */
326 static uint_t
327 xhci_endpoint_interval(xhci_device_t *xd, usb_ep_descr_t *ep)
328 {
329 int type = ep->bmAttributes & USB_EP_ATTR_MASK;
330 int speed = xd->xd_usbdev->usb_port_status;
331
332 /*
333 * Handle Cases 1 and 5 first.
334 */
335 if (type == USB_EP_ATTR_CONTROL || type == USB_EP_ATTR_BULK) {
336 if (speed != USBA_HIGH_SPEED_DEV)
337 return (0);
338 return (xhci_endpoint_linear_interval(ep));
339 }
340
341 /*
342 * Handle Isoch and Intr cases next.
343 */
344 switch (speed) {
345 case USBA_LOW_SPEED_DEV:
346 /*
347 * Interrupt endpoints at low speed are the same as full speed,
348 * hence the fall through.
349 */
350 if (type == USB_EP_ATTR_ISOCH) {
351 return (xhci_endpoint_exponential_interval(ep) + 3);
352 }
353 /* FALLTHROUGH */
354 case USBA_FULL_SPEED_DEV:
355 return (xhci_endpoint_linear_interval(ep) + 3);
356 case USBA_HIGH_SPEED_DEV:
357 case USBA_SUPER_SPEED_DEV:
358 default:
359 /*
360 * Case 2. Treat any newer and faster speeds as Super Speed by
361 * default as USB 3.1 is effectively treated the same here.
362 */
363 return (xhci_endpoint_exponential_interval(ep));
364 }
365 }
366
367 /*
368 * The way to calculate the Maximum ESIT is described in xHCI 1.1 / 4.14.2.
369 * First off, this only applies to Interrupt and Isochronous descriptors. For
370 * Super Speed and newer things, it comes out of a descriptor. Otherwise we
371 * calculate it by doing 'Max Packet Size' * ('Max Burst' + 1).
372 */
373 static uint_t
374 xhci_endpoint_max_esit(xhci_device_t *xd, xhci_endpoint_t *xep, uint_t mps,
375 uint_t burst)
376 {
377 if (xep->xep_type == USB_EP_ATTR_CONTROL ||
378 xep->xep_type == USB_EP_ATTR_BULK) {
379 return (0);
380 }
381
382 /*
383 * Note that this will need to be updated for SuperSpeedPlus ISOC
384 * devices to pull from the secondary companion descriptor they use.
385 */
386 if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
387 usb_ep_xdescr_t *ep_xdesc = &xep->xep_pipe->p_xep;
388 ASSERT(xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP);
389 return (ep_xdesc->uex_ep_ss.wBytesPerInterval);
390 }
391
392 return (mps * (burst + 1));
393 }
394
395 /*
396 * We've been asked to calculate and tell the xHCI controller an average TRB
397 * data length. This is talked about in an implementation note in xHCI 1.1 /
398 * 4.14.1.1. So, the reality is that it's hard to actually calculate this, as
399 * we're supposed to take into account all of the TRBs that we use on that ring.
400 *
401 * Surveying other xHCI drivers, they all agree on using the default of 8 for
402 * control endpoints; however, from there things get a little more fluid. For
403 * interrupt and isochronous endpoints, many device use the minimum of the max
404 * packet size and the device's pagesize. For bulk endpoints some folks punt and
405 * don't set anything and others try and set it to the pagesize. The xHCI
406 * implementation note suggests a 3k size here initially. For now, we'll just
407 * guess for bulk endpoints and use our page size as a determining factor for
408 * this and use the BSD style for others. Note Linux here only sets this value
409 * for control devices.
410 */
411 static uint_t
412 xhci_endpoint_avg_trb(xhci_t *xhcip, usb_ep_descr_t *ep, int mps)
413 {
414 int type = ep->bmAttributes & USB_EP_ATTR_MASK;
415
416 switch (type) {
417 case USB_EP_ATTR_ISOCH:
418 case USB_EP_ATTR_INTR:
419 return (MIN(xhcip->xhci_caps.xcap_pagesize, mps));
420 case USB_EP_ATTR_CONTROL:
421 return (XHCI_CONTEXT_DEF_CTRL_ATL);
422 case USB_EP_ATTR_BULK:
423 return (xhcip->xhci_caps.xcap_pagesize);
424 default:
425 panic("bad USB endpoint type: %d", type);
426 }
427
428 /* LINTED: E_FUNC_NO_RET_VAL */
429 }
430
431 int
432 xhci_endpoint_setup_context(xhci_t *xhcip, xhci_device_t *xd,
433 xhci_endpoint_t *xep)
434 {
435 uint_t eptype, burst, ival, max_esit, avgtrb, mps, mult, cerr;
436 xhci_endpoint_context_t *ectx;
437 uint64_t deq;
438
439 /*
440 * For a USB >=3.0 device we should always have its companion descriptor
441 * provided for us by USBA. If it's not here, complain loudly and fail.
442 */
443 if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV &&
444 (xep->xep_pipe->p_xep.uex_flags & USB_EP_XFLAGS_SS_COMP) == 0) {
445 const char *prod, *mfg;
446
447 prod = xd->xd_usbdev->usb_product_str;
448 if (prod == NULL)
449 prod = "Unknown Device";
450 mfg = xd->xd_usbdev->usb_mfg_str;
451 if (mfg == NULL)
452 mfg = "Unknown Manufacturer";
453
454 xhci_log(xhcip, "Encountered USB >=3.0 device without endpoint "
455 "companion descriptor. Ensure driver %s is properly using "
456 "usb_pipe_xopen() for device %s %s",
457 ddi_driver_name(xd->xd_usbdev->usb_dip), prod, mfg);
458 return (EINVAL);
459 }
460
461 ectx = xd->xd_endin[xep->xep_num];
462 VERIFY(ectx != NULL);
463 VERIFY(xd->xd_usbdev->usb_dev_descr != NULL);
464 VERIFY(xep->xep_pipe != NULL);
465
466 mps = xep->xep_pipe->p_ep.wMaxPacketSize & XHCI_CONTEXT_MPS_MASK;
467 mult = XHCI_CONTEXT_DEF_MULT;
468 cerr = XHCI_CONTEXT_DEF_CERR;
469
470 switch (xep->xep_type) {
471 case USB_EP_ATTR_ISOCH:
472 /*
473 * When we have support for USB 3.1 SuperSpeedPlus devices,
474 * we'll need to make sure that we also check for its secondary
475 * endpoint companion descriptor here.
476 */
477 /*
478 * Super Speed devices nominally have these xHCI super speed
479 * companion descriptors. We know that we're not properly
480 * grabbing them right now, so until we do, we should basically
481 * error about it.
482 */
483 if (xd->xd_usbdev->usb_port_status >= USBA_SUPER_SPEED_DEV) {
484 ASSERT(xep->xep_pipe->p_xep.uex_flags &
485 USB_EP_XFLAGS_SS_COMP);
486 mult = xep->xep_pipe->p_xep.uex_ep_ss.bmAttributes &
487 USB_EP_SS_COMP_ISOC_MULT_MASK;
488 }
489
490 mps &= XHCI_CONTEXT_MPS_MASK;
491 cerr = XHCI_CONTEXT_ISOCH_CERR;
492 break;
493 default:
494 /*
495 * No explicit changes needed for CONTROL, INTR, and BULK
496 * endpoints. They've been handled already and don't have any
497 * differences.
498 */
499 break;
500 }
501
502 eptype = xhci_endpoint_epdesc_to_type(&xep->xep_pipe->p_xep.uex_ep);
503 burst = xhci_endpoint_determine_burst(xd, xep);
504 ival = xhci_endpoint_interval(xd, &xep->xep_pipe->p_xep.uex_ep);
505 max_esit = xhci_endpoint_max_esit(xd, xep, mps, burst);
506 avgtrb = xhci_endpoint_avg_trb(xhcip, &xep->xep_pipe->p_xep.uex_ep,
507 mps);
508
509 /*
510 * The multi field may be reserved as zero if the LEC feature flag is
511 * set. See the description of mult in xHCI 1.1 / 6.2.3.
512 */
513 if (xhcip->xhci_caps.xcap_flags2 & XCAP2_LEC)
514 mult = 0;
515
516 bzero(ectx, sizeof (xhci_endpoint_context_t));
517
518 ectx->xec_info = LE_32(XHCI_EPCTX_SET_MULT(mult) |
519 XHCI_EPCTX_SET_IVAL(ival));
520 if (xhcip->xhci_caps.xcap_flags2 & XCAP2_LEC)
521 ectx->xec_info |= LE_32(XHCI_EPCTX_SET_MAX_ESIT_HI(max_esit));
522
523 ectx->xec_info2 = LE_32(XHCI_EPCTX_SET_CERR(cerr) |
524 XHCI_EPCTX_SET_EPTYPE(eptype) | XHCI_EPCTX_SET_MAXB(burst) |
525 XHCI_EPCTX_SET_MPS(mps));
526
527 deq = xhci_dma_pa(&xep->xep_ring.xr_dma) + sizeof (xhci_trb_t) *
528 xep->xep_ring.xr_tail;
529 ectx->xec_dequeue = LE_64(deq | xep->xep_ring.xr_cycle);
530
531 ectx->xec_txinfo = LE_32(XHCI_EPCTX_MAX_ESIT_PAYLOAD(max_esit) |
532 XHCI_EPCTX_AVG_TRB_LEN(avgtrb));
533
534 XHCI_DMA_SYNC(xd->xd_ictx, DDI_DMA_SYNC_FORDEV);
535 if (xhci_check_dma_handle(xhcip, &xd->xd_ictx) != DDI_FM_OK) {
536 xhci_error(xhcip, "failed to initialize device input "
537 "context on slot %d and port %d for endpoint %u: "
538 "encountered fatal FM error synchronizing input context "
539 "DMA memory", xd->xd_slot, xd->xd_port, xep->xep_num);
540 xhci_fm_runtime_reset(xhcip);
541 return (EIO);
542 }
543
544 return (0);
545 }
546
547 /*
548 * Initialize the endpoint and its input context for a given device. This is
549 * called from two different contexts:
550 *
551 * 1. Initializing a device
552 * 2. Opening a USB pipe
553 *
554 * In the second case, we need to worry about locking around the device. We
555 * don't need to worry about the locking in the first case because the USBA
556 * doesn't know about it yet.
557 */
558 int
559 xhci_endpoint_init(xhci_t *xhcip, xhci_device_t *xd,
560 usba_pipe_handle_data_t *ph)
561 {
562 int ret;
563 uint_t epid;
564 xhci_endpoint_t *xep;
565
566 if (ph == NULL) {
567 epid = XHCI_DEFAULT_ENDPOINT;
568 } else {
569 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
570 epid = xhci_endpoint_pipe_to_epid(ph);
571 }
572 VERIFY(xd->xd_endpoints[epid] == NULL);
573
574 xep = kmem_zalloc(sizeof (xhci_endpoint_t), KM_SLEEP);
575 list_create(&xep->xep_transfers, sizeof (xhci_transfer_t),
576 offsetof(xhci_transfer_t, xt_link));
577 cv_init(&xep->xep_state_cv, NULL, CV_DRIVER, NULL);
578 xep->xep_xd = xd;
579 xep->xep_xhci = xhcip;
580 xep->xep_num = epid;
581 if (ph == NULL) {
582 xep->xep_pipe = NULL;
583 xep->xep_type = USB_EP_ATTR_CONTROL;
584 } else {
585 xep->xep_pipe = ph;
586 xep->xep_type = ph->p_ep.bmAttributes & USB_EP_ATTR_MASK;
587 }
588
589 if ((ret = xhci_ring_alloc(xhcip, &xep->xep_ring)) != 0) {
590 cv_destroy(&xep->xep_state_cv);
591 list_destroy(&xep->xep_transfers);
592 kmem_free(xep, sizeof (xhci_endpoint_t));
593 return (ret);
594 }
595
596 if ((ret = xhci_ring_reset(xhcip, &xep->xep_ring)) != 0) {
597 xhci_ring_free(&xep->xep_ring);
598 cv_destroy(&xep->xep_state_cv);
599 list_destroy(&xep->xep_transfers);
600 kmem_free(xep, sizeof (xhci_endpoint_t));
601 return (ret);
602 }
603
604 xd->xd_endpoints[epid] = xep;
605 if (ph == NULL) {
606 ret = xhci_endpoint_setup_default_context(xhcip, xd, xep);
607 } else {
608 ret = xhci_endpoint_setup_context(xhcip, xd, xep);
609 }
610 if (ret != 0) {
611 xhci_endpoint_fini(xd, xep->xep_num);
612 return (ret);
613 }
614
615 return (0);
616 }
617
618 /*
619 * Attempt to quiesce an endpoint. Depending on the state of the endpoint, we
620 * may need to simply stop it. Alternatively, we may need to explicitly reset
621 * the endpoint. Once done, this endpoint should be stopped and can be
622 * manipulated.
623 */
624 int
625 xhci_endpoint_quiesce(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep)
626 {
627 int ret = USB_SUCCESS;
628 xhci_endpoint_context_t *epctx = xd->xd_endout[xep->xep_num];
629
630 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
631 ASSERT(xep->xep_state & XHCI_ENDPOINT_QUIESCE);
632
633 /*
634 * First attempt to stop the endpoint, unless it's halted. We don't
635 * really care what state it is in. Note that because other activity
636 * could be going on, the state may change on us; however, if it's
637 * running, it will always transition to a stopped state and none of the
638 * other valid states will allow transitions without us taking an active
639 * action.
640 */
641 if (!(xep->xep_state & XHCI_ENDPOINT_HALTED)) {
642 mutex_exit(&xhcip->xhci_lock);
643 ret = xhci_command_stop_endpoint(xhcip, xd, xep);
644 mutex_enter(&xhcip->xhci_lock);
645
646 if (ret == USB_INVALID_CONTEXT) {
647 XHCI_DMA_SYNC(xd->xd_octx, DDI_DMA_SYNC_FORKERNEL);
648 }
649 }
650
651 /*
652 * Now, if we had the HALTED flag set or we failed to stop it due to a
653 * context error and we're in the HALTED state now, reset the end point.
654 */
655 if ((xep->xep_state & XHCI_ENDPOINT_HALTED) ||
656 (ret == USB_INVALID_CONTEXT &&
657 XHCI_EPCTX_STATE(LE_32(epctx->xec_info)) == XHCI_EP_HALTED)) {
658 mutex_exit(&xhcip->xhci_lock);
659 ret = xhci_command_reset_endpoint(xhcip, xd, xep);
660 mutex_enter(&xhcip->xhci_lock);
661 }
662
663 /*
664 * Ideally, one of the two commands should have worked; however, we
665 * could have had a context error due to being in the wrong state.
666 * Verify that we're either in the ERROR or STOPPED state and treat both
667 * as success. All callers are assumed to be doing this so they can
668 * change the dequeue pointer.
669 */
670 if (ret != USB_SUCCESS && ret != USB_INVALID_CONTEXT) {
671 return (ret);
672 }
673
674 if (ret == USB_INVALID_CONTEXT) {
675 XHCI_DMA_SYNC(xd->xd_octx, DDI_DMA_SYNC_FORKERNEL);
676
677 switch (XHCI_EPCTX_STATE(LE_32(epctx->xec_info))) {
678 case XHCI_EP_STOPPED:
679 case XHCI_EP_ERROR:
680 /*
681 * This is where we wanted to go, so let's just take it.
682 */
683 ret = USB_SUCCESS;
684 break;
685 case XHCI_EP_DISABLED:
686 case XHCI_EP_RUNNING:
687 case XHCI_EP_HALTED:
688 default:
689 /*
690 * If we're in any of these, something really weird has
691 * happened and it's not worth trying to recover at this
692 * point.
693 */
694 xhci_error(xhcip, "!asked to stop endpoint %u on slot "
695 "%d and port %d: ended up in unexpected state %d",
696 xep->xep_num, xd->xd_slot, xd->xd_port,
697 XHCI_EPCTX_STATE(LE_32(epctx->xec_info)));
698 return (ret);
699 }
700 }
701
702 /*
703 * Now that we're successful, we can clear any possible halted state
704 * tracking that we might have had.
705 */
706 if (ret == USB_SUCCESS) {
707 xep->xep_state &= ~XHCI_ENDPOINT_HALTED;
708 }
709
710 return (ret);
711 }
712
713 int
714 xhci_endpoint_ring(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep)
715 {
716 /*
717 * The doorbell ID's are offset by one from the endpoint numbers that we
718 * keep.
719 */
720 xhci_put32(xhcip, XHCI_R_DOOR, XHCI_DOORBELL(xd->xd_slot),
721 xep->xep_num + 1);
722 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
723 xhci_error(xhcip, "failed to ring doorbell for slot %d and "
724 "endpoint %u: encountered fatal FM register access error",
725 xd->xd_slot, xep->xep_num);
726 xhci_fm_runtime_reset(xhcip);
727 return (USB_HC_HARDWARE_ERROR);
728 }
729
730 DTRACE_PROBE3(xhci__doorbell__ring, xhci_t *, xhcip, uint32_t,
731 XHCI_DOORBELL(xd->xd_slot), uint32_t, xep->xep_num + 1);
732
733 return (USB_SUCCESS);
734 }
735
736 static void
737 xhci_endpoint_tick(void *arg)
738 {
739 int ret;
740 xhci_transfer_t *xt;
741 xhci_endpoint_t *xep = arg;
742 xhci_device_t *xd = xep->xep_xd;
743 xhci_t *xhcip = xep->xep_xhci;
744
745 mutex_enter(&xhcip->xhci_lock);
746
747 /*
748 * If we have the teardown flag set, then this is going away, don't try
749 * to do anything. Also, if somehow a periodic endpoint has something
750 * scheduled, just quit now and don't bother.
751 */
752 if (xep->xep_state & (XHCI_ENDPOINT_TEARDOWN |
753 XHCI_ENDPOINT_PERIODIC)) {
754 xep->xep_timeout = 0;
755 mutex_exit(&xhcip->xhci_lock);
756 return;
757 }
758
759 /*
760 * If something else has already kicked off, something potentially
761 * dangerous, just don't bother waiting for it and reschedule.
762 */
763 if (xep->xep_state & XHCI_ENDPOINT_DONT_SCHEDULE) {
764 xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
765 drv_usectohz(XHCI_TICK_TIMEOUT_US));
766 mutex_exit(&xhcip->xhci_lock);
767 return;
768 }
769
770 /*
771 * At this point, we have an endpoint that we need to consider. See if
772 * there are any transfers on it, if none, we're done. If so, check if
773 * we have exceeded the timeout. If we have, then we have some work to
774 * do.
775 */
776 xt = list_head(&xep->xep_transfers);
777 if (xt == NULL) {
778 xep->xep_timeout = 0;
779 mutex_exit(&xhcip->xhci_lock);
780 return;
781 }
782
783 if (xt->xt_timeout > 0) {
784 xt->xt_timeout--;
785 xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
786 drv_usectohz(XHCI_TICK_TIMEOUT_US));
787 mutex_exit(&xhcip->xhci_lock);
788 return;
789 }
790
791 /*
792 * This item has timed out. We need to stop the ring and take action.
793 */
794 xep->xep_state |= XHCI_ENDPOINT_TIMED_OUT | XHCI_ENDPOINT_QUIESCE;
795 ret = xhci_endpoint_quiesce(xhcip, xd, xep);
796 if (ret != USB_SUCCESS) {
797 /*
798 * If we fail to quiesce during the timeout, then remove the
799 * state flags and hopefully we'll be able to the next time
800 * around or if a reset or polling stop comes in, maybe it can
801 * deal with it.
802 */
803 xep->xep_state &= ~(XHCI_ENDPOINT_QUIESCE |
804 XHCI_ENDPOINT_TIMED_OUT);
805 xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
806 drv_usectohz(XHCI_TICK_TIMEOUT_US));
807 mutex_exit(&xhcip->xhci_lock);
808 cv_broadcast(&xep->xep_state_cv);
809 xhci_error(xhcip, "failed to successfully quiesce timed out "
810 "endpoint %u of device on slot %d and port %d: device "
811 "remains timed out", xep->xep_num, xd->xd_slot,
812 xd->xd_port);
813 return;
814 }
815
816 xhci_ring_skip_transfer(&xep->xep_ring, xt);
817 (void) list_remove_head(&xep->xep_transfers);
818 mutex_exit(&xhcip->xhci_lock);
819
820 /*
821 * At this point, we try and set the ring's dequeue pointer. If this
822 * fails, we're left in an awkward state. We've already adjusted the
823 * ring and removed the transfer. All we can really do is go through and
824 * return the transfer and hope that they perhaps attempt to reset the
825 * ring and that will succeed at this point. Based on everything we've
826 * done to set things up, it'd be odd if this did fail.
827 */
828 ret = xhci_command_set_tr_dequeue(xhcip, xd, xep);
829 mutex_enter(&xhcip->xhci_lock);
830 xep->xep_state &= ~XHCI_ENDPOINT_QUIESCE;
831 if (ret == USB_SUCCESS) {
832 xep->xep_state &= ~XHCI_ENDPOINT_TIMED_OUT;
833 } else {
834 xhci_error(xhcip, "failed to successfully set transfer ring "
835 "dequeue pointer of timed out endpoint %u of "
836 "device on slot %d and port %d: device remains timed out, "
837 "please use cfgadm to recover", xep->xep_num, xd->xd_slot,
838 xd->xd_port);
839 }
840 xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
841 drv_usectohz(XHCI_TICK_TIMEOUT_US));
842 mutex_exit(&xhcip->xhci_lock);
843 cv_broadcast(&xep->xep_state_cv);
844
845 /*
846 * Because we never time out periodic related activity, we will always
847 * have the request on the transfer.
848 */
849 ASSERT(xt->xt_usba_req != NULL);
850 usba_hcdi_cb(xep->xep_pipe, xt->xt_usba_req, USB_CR_TIMEOUT);
851 xhci_transfer_free(xhcip, xt);
852 }
853
854 /*
855 * We've been asked to schedule a series of frames onto the specified endpoint.
856 * We need to make sure that there is enough room, at which point we can queue
857 * it and then ring the door bell. Note that we queue in reverse order to make
858 * sure that if the ring moves on, it won't see the correct cycle bit.
859 */
860 int
861 xhci_endpoint_schedule(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep,
862 xhci_transfer_t *xt, boolean_t ring)
863 {
864 int i;
865 xhci_ring_t *rp = &xep->xep_ring;
866
867 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
868 ASSERT(xt->xt_ntrbs > 0);
869 ASSERT(xt->xt_trbs != NULL);
870
871 if ((xep->xep_state & XHCI_ENDPOINT_DONT_SCHEDULE) != 0)
872 return (USB_FAILURE);
873
874 if (xhci_ring_trb_space(rp, xt->xt_ntrbs) == B_FALSE)
875 return (USB_NO_RESOURCES);
876
877 for (i = xt->xt_ntrbs - 1; i > 0; i--) {
878 xhci_ring_trb_fill(rp, i, &xt->xt_trbs[i], &xt->xt_trbs_pa[i],
879 B_TRUE);
880 }
881 xhci_ring_trb_fill(rp, 0U, &xt->xt_trbs[0], &xt->xt_trbs_pa[0],
882 B_FALSE);
883
884 XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
885 xhci_ring_trb_produce(rp, xt->xt_ntrbs);
886 list_insert_tail(&xep->xep_transfers, xt);
887
888 XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
889 if (xhci_check_dma_handle(xhcip, &rp->xr_dma) != DDI_FM_OK) {
890 xhci_error(xhcip, "failed to write out TRB for device on slot "
891 "%d, port %d, and endpoint %u: encountered fatal FM error "
892 "synchronizing ring DMA memory", xd->xd_slot, xd->xd_port,
893 xep->xep_num);
894 xhci_fm_runtime_reset(xhcip);
895 return (USB_HC_HARDWARE_ERROR);
896 }
897
898 if (xep->xep_timeout == 0 &&
899 !(xep->xep_state & XHCI_ENDPOINT_PERIODIC)) {
900 xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
901 drv_usectohz(XHCI_TICK_TIMEOUT_US));
902 }
903
904 xt->xt_sched_time = gethrtime();
905
906 if (ring == B_FALSE)
907 return (USB_SUCCESS);
908
909 return (xhci_endpoint_ring(xhcip, xd, xep));
910 }
911
912 static xhci_transfer_t *
913 xhci_endpoint_determine_transfer(xhci_t *xhcip, xhci_endpoint_t *xep,
914 xhci_trb_t *trb, uint_t *offp)
915 {
916 uint_t i;
917 uint64_t addr;
918 xhci_transfer_t *xt;
919
920 ASSERT(xhcip != NULL);
921 ASSERT(offp != NULL);
922 ASSERT(xep != NULL);
923 ASSERT(trb != NULL);
924 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
925
926 if ((xt = list_head(&xep->xep_transfers)) == NULL)
927 return (NULL);
928
929 addr = LE_64(trb->trb_addr);
930
931 /*
932 * Check if this is the simple case of an event data. If it is, then all
933 * we need to do is look and see its data matches the address of the
934 * transfer.
935 */
936 if (XHCI_TRB_GET_ED(LE_32(trb->trb_flags)) != 0) {
937 if (LE_64(trb->trb_addr) != (uintptr_t)xt)
938 return (NULL);
939
940 *offp = xt->xt_ntrbs - 1;
941 return (xt);
942 }
943
944 /*
945 * This represents an error that has occurred. We need to check two
946 * different things. The first is that the TRB PA maps to one of the
947 * TRBs in the transfer. Secondly, we need to make sure that it makes
948 * sense in the context of the ring and our notion of where the tail is.
949 */
950 for (i = 0; i < xt->xt_ntrbs; i++) {
951 if (xt->xt_trbs_pa[i] == addr)
952 break;
953 }
954
955 if (i == xt->xt_ntrbs)
956 return (NULL);
957
958 if (xhci_ring_trb_valid_range(&xep->xep_ring, LE_64(trb->trb_addr),
959 xt->xt_ntrbs) == -1)
960 return (NULL);
961
962 *offp = i;
963 return (xt);
964 }
965
966 static void
967 xhci_endpoint_reschedule_periodic(xhci_t *xhcip, xhci_device_t *xd,
968 xhci_endpoint_t *xep, xhci_transfer_t *xt)
969 {
970 int ret;
971 xhci_pipe_t *xp = (xhci_pipe_t *)xep->xep_pipe->p_hcd_private;
972 xhci_periodic_pipe_t *xpp = &xp->xp_periodic;
973
974 ASSERT3U(xpp->xpp_tsize, >, 0);
975
976 xt->xt_short = 0;
977 xt->xt_cr = USB_CR_OK;
978
979 mutex_enter(&xhcip->xhci_lock);
980
981 /*
982 * If we don't have an active poll, then we shouldn't bother trying to
983 * reschedule it. This means that we're trying to stop or we ran out of
984 * memory.
985 */
986 if (xpp->xpp_poll_state != XHCI_PERIODIC_POLL_ACTIVE) {
987 mutex_exit(&xhcip->xhci_lock);
988 return;
989 }
990
991 if (xep->xep_type == USB_EP_ATTR_ISOCH) {
992 int i;
993 for (i = 0; i < xt->xt_ntrbs; i++) {
994 xt->xt_isoc[i].isoc_pkt_actual_length =
995 xt->xt_isoc[i].isoc_pkt_length;
996 xt->xt_isoc[i].isoc_pkt_status = USB_CR_OK;
997 }
998 }
999
1000 /*
1001 * In general, there should always be space on the ring for this. The
1002 * only reason that rescheduling an existing transfer for a periodic
1003 * endpoint wouldn't work is because we have a hardware error, at which
1004 * point we're going to be going down hard anyways. We log and bump a
1005 * stat here to make this case discoverable in case our assumptions our
1006 * wrong.
1007 */
1008 ret = xhci_endpoint_schedule(xhcip, xd, xep, xt, B_TRUE);
1009 if (ret != 0) {
1010 xhci_log(xhcip, "!failed to reschedule periodic endpoint %u "
1011 "(type %u) on slot %d: %d\n", xep->xep_num, xep->xep_type,
1012 xd->xd_slot, ret);
1013 }
1014 mutex_exit(&xhcip->xhci_lock);
1015 }
1016
1017 /*
1018 * We're dealing with a message on a control endpoint. This may be a default
1019 * endpoint or otherwise. These usually come in groups of 3+ TRBs where you have
1020 * a setup stage, data stage (which may have one or more other TRBs) and then a
1021 * final status stage.
1022 *
1023 * We generally set ourselves up such that we get interrupted and notified only
1024 * on the status stage and for short transfers in the data stage. If we
1025 * encounter a short transfer in the data stage, then we need to go through and
1026 * check whether or not the short transfer is allowed. If it is, then there's
1027 * nothing to do. We'll update everything and call back the framework once we
1028 * get the status stage.
1029 */
1030 static boolean_t
1031 xhci_endpoint_control_callback(xhci_t *xhcip, xhci_device_t *xd,
1032 xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1033 {
1034 int code;
1035 usb_ctrl_req_t *ucrp;
1036 xhci_transfer_t *rem;
1037
1038 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1039
1040 code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1041 ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1042
1043 /*
1044 * Now that we know what this TRB is for, was it for a data/normal stage
1045 * or is it the status stage. We cheat by looking at the last entry. If
1046 * it's a data stage, then we must have gotten a short write. We record
1047 * this fact and whether we should consider the transfer fatal for the
1048 * subsequent status stage.
1049 */
1050 if (off != xt->xt_ntrbs - 1) {
1051 uint_t remain;
1052 usb_ctrl_req_t *ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1053
1054 /*
1055 * This is a data stage TRB. The only reason we should have
1056 * gotten something for this is beacuse it was short. Make sure
1057 * it's okay before we continue.
1058 */
1059 VERIFY3S(code, ==, XHCI_CODE_SHORT_XFER);
1060 if (!(ucrp->ctrl_attributes & USB_ATTRS_SHORT_XFER_OK)) {
1061 xt->xt_cr = USB_CR_DATA_UNDERRUN;
1062 mutex_exit(&xhcip->xhci_lock);
1063 return (B_TRUE);
1064 }
1065
1066 /*
1067 * The value in the resulting trb is how much data remained to
1068 * be transferred. Normalize that against the original buffer
1069 * size.
1070 */
1071 remain = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1072 xt->xt_short = xt->xt_buffer.xdb_len - remain;
1073 mutex_exit(&xhcip->xhci_lock);
1074 return (B_TRUE);
1075 }
1076
1077 /*
1078 * Okay, this is a status stage trb that's in good health. We should
1079 * finally go ahead, sync data and try and finally do the callback. If
1080 * we have short data, then xt->xt_short will be non-zero.
1081 */
1082 if (xt->xt_data_tohost == B_TRUE) {
1083 size_t len;
1084 if (xt->xt_short != 0) {
1085 len = xt->xt_short;
1086 } else {
1087 len = xt->xt_buffer.xdb_len;
1088 }
1089
1090 if (xhci_transfer_sync(xhcip, xt, DDI_DMA_SYNC_FORCPU) !=
1091 DDI_FM_OK) {
1092 xhci_error(xhcip, "failed to process control transfer "
1093 "callback for endpoint %u of device on slot %d and "
1094 "port %d: encountered fatal FM error synchronizing "
1095 "DMA memory, resetting device", xep->xep_num,
1096 xd->xd_slot, xd->xd_port);
1097 xhci_fm_runtime_reset(xhcip);
1098 mutex_exit(&xhcip->xhci_lock);
1099 return (B_FALSE);
1100 }
1101
1102 xhci_transfer_copy(xt, ucrp->ctrl_data->b_rptr, len, B_TRUE);
1103 ucrp->ctrl_data->b_wptr += len;
1104 }
1105
1106 /*
1107 * Now we're done. We can go ahead and bump the ring. Free the transfer
1108 * outside of the lock and call back into the framework.
1109 */
1110 VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, LE_64(trb->trb_addr)));
1111 rem = list_remove_head(&xep->xep_transfers);
1112 VERIFY3P(rem, ==, xt);
1113 mutex_exit(&xhcip->xhci_lock);
1114
1115 usba_hcdi_cb(xep->xep_pipe, (usb_opaque_t)ucrp, xt->xt_cr);
1116 xhci_transfer_free(xhcip, xt);
1117
1118 return (B_TRUE);
1119 }
1120
1121 /*
1122 * Cons up a new usb request for the periodic data transfer if we can. If there
1123 * isn't one available, change the return code to NO_RESOURCES and stop polling
1124 * on this endpoint, thus using and consuming the original request.
1125 */
1126 static usb_opaque_t
1127 xhci_endpoint_dup_periodic(xhci_endpoint_t *xep, xhci_transfer_t *xt,
1128 usb_cr_t *cr)
1129 {
1130 usb_opaque_t urp;
1131
1132 xhci_pipe_t *xp = (xhci_pipe_t *)xep->xep_pipe->p_hcd_private;
1133 xhci_periodic_pipe_t *xpp = &xp->xp_periodic;
1134
1135 /*
1136 * In general, transfers shouldn't have a usb request. However, oneshot
1137 * Interrupt IN ones will, so we use this as a way to shortcut out of
1138 * here.
1139 */
1140 if (xt->xt_usba_req != NULL)
1141 return (xt->xt_usba_req);
1142
1143 if (xep->xep_type == USB_EP_ATTR_INTR) {
1144 urp = (usb_opaque_t)usba_hcdi_dup_intr_req(xep->xep_pipe->p_dip,
1145 (usb_intr_req_t *)xpp->xpp_usb_req, xpp->xpp_tsize, 0);
1146 } else {
1147 urp = (usb_opaque_t)usba_hcdi_dup_isoc_req(xep->xep_pipe->p_dip,
1148 (usb_isoc_req_t *)xpp->xpp_usb_req, 0);
1149 }
1150 if (urp == NULL) {
1151 xpp->xpp_poll_state = XHCI_PERIODIC_POLL_NOMEM;
1152 urp = xpp->xpp_usb_req;
1153 xpp->xpp_usb_req = NULL;
1154 *cr = USB_CR_NO_RESOURCES;
1155 } else {
1156 mutex_enter(&xep->xep_pipe->p_mutex);
1157 xep->xep_pipe->p_req_count++;
1158 mutex_exit(&xep->xep_pipe->p_mutex);
1159 }
1160
1161 return (urp);
1162 }
1163
1164 static xhci_device_t *
1165 xhci_device_lookup_by_slot(xhci_t *xhcip, int slot)
1166 {
1167 xhci_device_t *xd;
1168
1169 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1170
1171 for (xd = list_head(&xhcip->xhci_usba.xa_devices); xd != NULL;
1172 xd = list_next(&xhcip->xhci_usba.xa_devices, xd)) {
1173 if (xd->xd_slot == slot)
1174 return (xd);
1175 }
1176
1177 return (NULL);
1178 }
1179
1180 /*
1181 * Handle things which consist solely of normal tranfers, in other words, bulk
1182 * and interrupt transfers.
1183 */
1184 static boolean_t
1185 xhci_endpoint_norm_callback(xhci_t *xhcip, xhci_device_t *xd,
1186 xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1187 {
1188 int code;
1189 usb_cr_t cr;
1190 xhci_transfer_t *rem;
1191 int attrs;
1192 mblk_t *mp;
1193 boolean_t periodic = B_FALSE;
1194 usb_opaque_t urp;
1195
1196 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1197 ASSERT(xep->xep_type == USB_EP_ATTR_BULK ||
1198 xep->xep_type == USB_EP_ATTR_INTR);
1199
1200 code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1201
1202 if (code == XHCI_CODE_SHORT_XFER) {
1203 uint_t residue;
1204 residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1205
1206 if (xep->xep_type == USB_EP_ATTR_BULK) {
1207 VERIFY3U(XHCI_TRB_GET_ED(LE_32(trb->trb_flags)), !=, 0);
1208 xt->xt_short = residue;
1209 } else {
1210 xt->xt_short = xt->xt_buffer.xdb_len - residue;
1211 }
1212 }
1213
1214 /*
1215 * If we have an interrupt from something that's not the last entry,
1216 * that must mean we had a short transfer, so there's nothing more for
1217 * us to do at the moment. We won't call back until everything's
1218 * finished for the general transfer.
1219 */
1220 if (off < xt->xt_ntrbs - 1) {
1221 mutex_exit(&xhcip->xhci_lock);
1222 return (B_TRUE);
1223 }
1224
1225 urp = xt->xt_usba_req;
1226 if (xep->xep_type == USB_EP_ATTR_BULK) {
1227 usb_bulk_req_t *ubrp = (usb_bulk_req_t *)xt->xt_usba_req;
1228 attrs = ubrp->bulk_attributes;
1229 mp = ubrp->bulk_data;
1230 } else {
1231 usb_intr_req_t *uirp = (usb_intr_req_t *)xt->xt_usba_req;
1232
1233 if (uirp == NULL) {
1234 periodic = B_TRUE;
1235 urp = xhci_endpoint_dup_periodic(xep, xt, &cr);
1236 uirp = (usb_intr_req_t *)urp;
1237
1238 /*
1239 * If we weren't able to duplicate the interrupt, then
1240 * we can't put any data in it.
1241 */
1242 if (cr == USB_CR_NO_RESOURCES)
1243 goto out;
1244 }
1245
1246 attrs = uirp->intr_attributes;
1247 mp = uirp->intr_data;
1248 }
1249
1250 if (xt->xt_data_tohost == B_TRUE) {
1251 size_t len;
1252 if (xt->xt_short != 0) {
1253 if (!(attrs & USB_ATTRS_SHORT_XFER_OK)) {
1254 cr = USB_CR_DATA_UNDERRUN;
1255 goto out;
1256 }
1257 len = xt->xt_short;
1258 } else {
1259 len = xt->xt_buffer.xdb_len;
1260 }
1261
1262 if (xhci_transfer_sync(xhcip, xt, DDI_DMA_SYNC_FORCPU) !=
1263 DDI_FM_OK) {
1264 xhci_error(xhcip, "failed to process normal transfer "
1265 "callback for endpoint %u of device on slot %d and "
1266 "port %d: encountered fatal FM error synchronizing "
1267 "DMA memory, resetting device", xep->xep_num,
1268 xd->xd_slot, xd->xd_port);
1269 xhci_fm_runtime_reset(xhcip);
1270 mutex_exit(&xhcip->xhci_lock);
1271 return (B_FALSE);
1272 }
1273
1274 xhci_transfer_copy(xt, mp->b_rptr, len, B_TRUE);
1275 mp->b_wptr += len;
1276 }
1277 cr = USB_CR_OK;
1278
1279 out:
1280 /*
1281 * Don't use the address from the TRB here. When we're dealing with
1282 * event data that will be entirely wrong.
1283 */
1284 VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, xt->xt_trbs_pa[off]));
1285 rem = list_remove_head(&xep->xep_transfers);
1286 VERIFY3P(rem, ==, xt);
1287 mutex_exit(&xhcip->xhci_lock);
1288
1289 usba_hcdi_cb(xep->xep_pipe, urp, cr);
1290 if (periodic == B_TRUE) {
1291 xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1292 } else {
1293 xhci_transfer_free(xhcip, xt);
1294 }
1295
1296 return (B_TRUE);
1297 }
1298
1299 static boolean_t
1300 xhci_endpoint_isoch_callback(xhci_t *xhcip, xhci_device_t *xd,
1301 xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1302 {
1303 int code;
1304 usb_cr_t cr;
1305 xhci_transfer_t *rem;
1306 usb_isoc_pkt_descr_t *desc;
1307 usb_isoc_req_t *usrp;
1308
1309 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1310 ASSERT3S(xep->xep_type, ==, USB_EP_ATTR_ISOCH);
1311
1312 code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1313
1314 /*
1315 * The descriptors that we copy the data from are set up to assume that
1316 * everything was OK and we transferred all the requested data.
1317 */
1318 desc = &xt->xt_isoc[off];
1319 if (code == XHCI_CODE_SHORT_XFER) {
1320 int residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1321 desc->isoc_pkt_actual_length -= residue;
1322 }
1323
1324 /*
1325 * We don't perform the callback until the very last TRB is returned
1326 * here. If we have a TRB report on something else, that means that we
1327 * had a short transfer.
1328 */
1329 if (off < xt->xt_ntrbs - 1) {
1330 mutex_exit(&xhcip->xhci_lock);
1331 return (B_TRUE);
1332 }
1333
1334 VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, LE_64(trb->trb_addr)));
1335 rem = list_remove_head(&xep->xep_transfers);
1336 VERIFY3P(rem, ==, xt);
1337 mutex_exit(&xhcip->xhci_lock);
1338
1339 cr = USB_CR_OK;
1340
1341 if (xt->xt_data_tohost == B_TRUE) {
1342 usb_opaque_t urp;
1343 urp = xhci_endpoint_dup_periodic(xep, xt, &cr);
1344 usrp = (usb_isoc_req_t *)urp;
1345
1346 if (cr == USB_CR_OK) {
1347 mblk_t *mp;
1348 size_t len;
1349 if (xhci_transfer_sync(xhcip, xt,
1350 DDI_DMA_SYNC_FORCPU) != DDI_FM_OK) {
1351 xhci_error(xhcip, "failed to process "
1352 "isochronous transfer callback for "
1353 "endpoint %u of device on slot %d and port "
1354 "%d: encountered fatal FM error "
1355 "synchronizing DMA memory, resetting "
1356 "device",
1357 xep->xep_num, xd->xd_slot, xd->xd_port);
1358 xhci_fm_runtime_reset(xhcip);
1359 mutex_exit(&xhcip->xhci_lock);
1360 return (B_FALSE);
1361 }
1362
1363 mp = usrp->isoc_data;
1364 len = xt->xt_buffer.xdb_len;
1365 xhci_transfer_copy(xt, mp->b_rptr, len, B_TRUE);
1366 mp->b_wptr += len;
1367 }
1368 } else {
1369 usrp = (usb_isoc_req_t *)xt->xt_usba_req;
1370 }
1371
1372 if (cr == USB_CR_OK) {
1373 bcopy(xt->xt_isoc, usrp->isoc_pkt_descr,
1374 sizeof (usb_isoc_pkt_descr_t) * usrp->isoc_pkts_count);
1375 }
1376
1377 usba_hcdi_cb(xep->xep_pipe, (usb_opaque_t)usrp, cr);
1378 if (xt->xt_data_tohost == B_TRUE) {
1379 xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1380 } else {
1381 xhci_transfer_free(xhcip, xt);
1382 }
1383
1384 return (B_TRUE);
1385 }
1386
1387 boolean_t
1388 xhci_endpoint_transfer_callback(xhci_t *xhcip, xhci_trb_t *trb)
1389 {
1390 boolean_t ret;
1391 int slot, endpoint, code;
1392 uint_t off;
1393 xhci_device_t *xd;
1394 xhci_endpoint_t *xep;
1395 xhci_transfer_t *xt;
1396 boolean_t transfer_done;
1397
1398 endpoint = XHCI_TRB_GET_EP(LE_32(trb->trb_flags));
1399 slot = XHCI_TRB_GET_SLOT(LE_32(trb->trb_flags));
1400 code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1401
1402 switch (code) {
1403 case XHCI_CODE_RING_UNDERRUN:
1404 case XHCI_CODE_RING_OVERRUN:
1405 /*
1406 * If we have an ISOC overrun or underrun then there will be no
1407 * valid data pointer in the TRB associated with it. Just drive
1408 * on.
1409 */
1410 return (B_TRUE);
1411 case XHCI_CODE_UNDEFINED:
1412 xhci_error(xhcip, "received transfer trb with undefined fatal "
1413 "error: resetting device");
1414 xhci_fm_runtime_reset(xhcip);
1415 return (B_FALSE);
1416 case XHCI_CODE_XFER_STOPPED:
1417 case XHCI_CODE_XFER_STOPINV:
1418 case XHCI_CODE_XFER_STOPSHORT:
1419 /*
1420 * This causes us to transition the endpoint to a stopped state.
1421 * Each of these indicate a different possible state that we
1422 * have to deal with. Effectively we're going to drop it and
1423 * leave it up to the consumers to figure out what to do. For
1424 * the moment, that's generally okay because stops are only used
1425 * in cases where we're cleaning up outstanding reqs, etc.
1426 *
1427 * We do this before we check for the corresponding transfer as
1428 * this will generally be generated by a command issued that's
1429 * stopping the ring.
1430 */
1431 return (B_TRUE);
1432 default:
1433 break;
1434 }
1435
1436 mutex_enter(&xhcip->xhci_lock);
1437 xd = xhci_device_lookup_by_slot(xhcip, slot);
1438 if (xd == NULL) {
1439 xhci_error(xhcip, "received transfer trb with code %d for "
1440 "unknown slot %d and endpoint %d: resetting device", code,
1441 slot, endpoint);
1442 mutex_exit(&xhcip->xhci_lock);
1443 xhci_fm_runtime_reset(xhcip);
1444 return (B_FALSE);
1445 }
1446
1447 /*
1448 * Endpoint IDs are indexed based on their Device Context Index, which
1449 * means that we need to subtract one to get the actual ID that we use.
1450 */
1451 xep = xd->xd_endpoints[endpoint - 1];
1452 if (xep == NULL) {
1453 xhci_error(xhcip, "received transfer trb with code %d, slot "
1454 "%d, and unknown endpoint %d: resetting device", code,
1455 slot, endpoint);
1456 mutex_exit(&xhcip->xhci_lock);
1457 xhci_fm_runtime_reset(xhcip);
1458 return (B_FALSE);
1459 }
1460
1461 /*
1462 * The TRB that we recieved may be an event data TRB for a bulk
1463 * endpoint, a normal or short completion for any other endpoint or an
1464 * error. In all cases, we need to figure out what transfer this
1465 * corresponds to. If this is an error, then we need to make sure that
1466 * the generating ring has been cleaned up.
1467 *
1468 * TRBs should be delivered in order, based on the ring. If for some
1469 * reason we find something that doesn't add up here, then we need to
1470 * assume that something has gone horribly wrong in the system and issue
1471 * a runtime reset. We issue the runtime reset rather than just trying
1472 * to stop and flush the ring, because it's unclear if we could stop
1473 * the ring in time.
1474 */
1475 if ((xt = xhci_endpoint_determine_transfer(xhcip, xep, trb, &off)) ==
1476 NULL) {
1477 xhci_error(xhcip, "received transfer trb with code %d, slot "
1478 "%d, and endpoint %d, but does not match current transfer "
1479 "for endpoint: resetting device", code, slot, endpoint);
1480 mutex_exit(&xhcip->xhci_lock);
1481 xhci_fm_runtime_reset(xhcip);
1482 return (B_FALSE);
1483 }
1484
1485 transfer_done = B_FALSE;
1486
1487 switch (code) {
1488 case XHCI_CODE_SUCCESS:
1489 case XHCI_CODE_SHORT_XFER:
1490 /* Handled by endpoint logic */
1491 break;
1492 case XHCI_CODE_STALL:
1493 /*
1494 * This causes us to transition to the halted state;
1495 * however, downstream clients are able to handle this just
1496 * fine.
1497 */
1498 xep->xep_state |= XHCI_ENDPOINT_HALTED;
1499 xt->xt_cr = USB_CR_STALL;
1500 transfer_done = B_TRUE;
1501 break;
1502 case XHCI_CODE_BABBLE:
1503 transfer_done = B_TRUE;
1504 xt->xt_cr = USB_CR_DATA_OVERRUN;
1505 xep->xep_state |= XHCI_ENDPOINT_HALTED;
1506 break;
1507 case XHCI_CODE_TXERR:
1508 case XHCI_CODE_SPLITERR:
1509 transfer_done = B_TRUE;
1510 xt->xt_cr = USB_CR_DEV_NOT_RESP;
1511 xep->xep_state |= XHCI_ENDPOINT_HALTED;
1512 break;
1513 case XHCI_CODE_BW_OVERRUN:
1514 transfer_done = B_TRUE;
1515 xt->xt_cr = USB_CR_DATA_OVERRUN;
1516 break;
1517 case XHCI_CODE_DATA_BUF:
1518 transfer_done = B_TRUE;
1519 if (xt->xt_data_tohost)
1520 xt->xt_cr = USB_CR_DATA_OVERRUN;
1521 else
1522 xt->xt_cr = USB_CR_DATA_UNDERRUN;
1523 break;
1524 default:
1525 /*
1526 * Treat these as general unspecified errors that don't cause a
1527 * stop of the ring. Even if it does, a subsequent timeout
1528 * should occur which causes us to end up dropping a pipe reset
1529 * or at least issuing a reset of the device as part of
1530 * quiescing.
1531 */
1532 transfer_done = B_TRUE;
1533 xt->xt_cr = USB_CR_HC_HARDWARE_ERR;
1534 break;
1535 }
1536
1537 if (transfer_done == B_TRUE) {
1538 xhci_transfer_t *alt;
1539
1540 alt = list_remove_head(&xep->xep_transfers);
1541 VERIFY3P(alt, ==, xt);
1542 mutex_exit(&xhcip->xhci_lock);
1543 if (xt->xt_usba_req == NULL) {
1544 usb_opaque_t urp;
1545
1546 urp = xhci_endpoint_dup_periodic(xep, xt, &xt->xt_cr);
1547 usba_hcdi_cb(xep->xep_pipe, urp, xt->xt_cr);
1548 } else {
1549 usba_hcdi_cb(xep->xep_pipe,
1550 (usb_opaque_t)xt->xt_usba_req, xt->xt_cr);
1551 xhci_transfer_free(xhcip, xt);
1552 }
1553 return (B_TRUE);
1554 }
1555
1556 /*
1557 * Process the transfer callback based on the type of endpoint. Each of
1558 * these callback functions will end up calling back into USBA via
1559 * usba_hcdi_cb() to return transfer information (whether successful or
1560 * not). Because we can't hold any locks across a call to that function,
1561 * all of these callbacks will drop the xhci_t`xhci_lock by the time
1562 * they return. This is why there's no mutex_exit() call before we
1563 * return.
1564 */
1565 switch (xep->xep_type) {
1566 case USB_EP_ATTR_CONTROL:
1567 ret = xhci_endpoint_control_callback(xhcip, xd, xep, xt, off,
1568 trb);
1569 break;
1570 case USB_EP_ATTR_BULK:
1571 ret = xhci_endpoint_norm_callback(xhcip, xd, xep, xt, off, trb);
1572 break;
1573 case USB_EP_ATTR_INTR:
1574 ret = xhci_endpoint_norm_callback(xhcip, xd, xep, xt, off,
1575 trb);
1576 break;
1577 case USB_EP_ATTR_ISOCH:
1578 ret = xhci_endpoint_isoch_callback(xhcip, xd, xep, xt, off,
1579 trb);
1580 break;
1581 default:
1582 panic("bad endpoint type: %u", xep->xep_type);
1583 }
1584
1585 return (ret);
1586 }