1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2016 Joyent, Inc.
14 */
15
16 /*
17 * -----------------------------
18 * xHCI Ring Management Routines
19 * -----------------------------
20 *
21 * There are three major different types of rings for xHCI, these are:
22 *
23 * 1) Command Rings
24 * 2) Event Rings
25 * 3) Transfer Rings
26 *
27 * Command and Transfer rings function in similar ways while the event rings are
28 * different. The difference comes in who is the consumer and who is the
29 * producer. In the case of command and transfer rings, the driver is the
30 * producer. For the event ring the driver is the consumer.
31 *
32 * Each ring in xhci has a synthetic head and tail register. Each entry in a
33 * ring has a bit that's often referred to as the 'Cycle bit'. The cycle bit is
271 head++;
272 if (head == xrp->xr_ntrb - 1) {
273 head = 0;
274 }
275
276 if (head == xrp->xr_tail)
277 return (B_FALSE);
278 }
279
280 return (B_TRUE);
281 }
282
283 /*
284 * Fill in a TRB in the ring at offset trboff. If cycle is currently set to
285 * B_TRUE, then we fill in the appropriate cycle bit to tell the system to
286 * advance, otherwise we leave the existing cycle bit untouched so the system
287 * doesn't accidentally advance until we have everything filled in.
288 */
289 void
290 xhci_ring_trb_fill(xhci_ring_t *xrp, uint_t trboff, xhci_trb_t *host_trb,
291 boolean_t put_cycle)
292 {
293 uint_t i;
294 uint32_t flags;
295 uint_t ent = xrp->xr_head;
296 uint8_t cycle = xrp->xr_cycle;
297 xhci_trb_t *trb;
298
299 for (i = 0; i < trboff; i++) {
300 ent++;
301 if (ent == xrp->xr_ntrb - 1) {
302 ent = 0;
303 cycle ^= 1;
304 }
305 }
306
307 /*
308 * If we're being asked to not update the cycle for it to be valid to be
309 * produced, we need to xor this once again to get to the inappropriate
310 * value.
311 */
312 if (put_cycle == B_FALSE)
313 cycle ^= 1;
314
315 trb = &xrp->xr_trb[ent];
316
317 trb->trb_addr = host_trb->trb_addr;
318 trb->trb_status = host_trb->trb_status;
319 flags = host_trb->trb_flags;
320 if (cycle == 0) {
321 flags &= ~LE_32(XHCI_TRB_CYCLE);
322 } else {
323 flags |= LE_32(XHCI_TRB_CYCLE);
324 }
325
326 trb->trb_flags = flags;
327 }
328
329 /*
330 * Update our metadata for the ring and verify the cycle bit is correctly set
331 * for the first trb. It is expected that it is incorrectly set.
332 */
333 void
334 xhci_ring_trb_produce(xhci_ring_t *xrp, uint_t ntrb)
335 {
336 uint_t i, ohead;
337 xhci_trb_t *trb;
338
339 VERIFY(ntrb > 0);
340
341 ohead = xrp->xr_head;
342
343 /*
344 * As part of updating the head, we need to make sure we correctly
345 * update the cycle bit of the link TRB. So we always do this first
346 * before we update the old head, to try and get a consistent view of
363 trb->trb_flags &= ~XHCI_TRB_CHAIN;
364
365 }
366 trb->trb_flags ^= LE_32(XHCI_TRB_CYCLE);
367 xrp->xr_cycle ^= 1;
368 xrp->xr_head = 0;
369 }
370 }
371
372 trb = &xrp->xr_trb[ohead];
373 trb->trb_flags ^= LE_32(XHCI_TRB_CYCLE);
374 }
375
376 /*
377 * This is a convenience wrapper for the single TRB case to make callers less
378 * likely to mess up some of the required semantics.
379 */
380 void
381 xhci_ring_trb_put(xhci_ring_t *xrp, xhci_trb_t *trb)
382 {
383 xhci_ring_trb_fill(xrp, 0U, trb, B_FALSE);
384 xhci_ring_trb_produce(xrp, 1U);
385 }
386
387 /*
388 * Update the tail pointer for a ring based on the DMA address of a consumed
389 * entry. Note, this entry indicates what we just processed, therefore we should
390 * bump the tail entry to the next one.
391 */
392 boolean_t
393 xhci_ring_trb_consumed(xhci_ring_t *xrp, uint64_t dma)
394 {
395 uint64_t pa = xhci_dma_pa(&xrp->xr_dma);
396 uint64_t high = pa + xrp->xr_ntrb * sizeof (xhci_trb_t);
397
398 if (dma < pa || dma >= high ||
399 dma % sizeof (xhci_trb_t) != 0)
400 return (B_FALSE);
401
402 dma -= pa;
403 dma /= sizeof (xhci_trb_t);
|
1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright (c) 2018, Joyent, Inc.
14 */
15
16 /*
17 * -----------------------------
18 * xHCI Ring Management Routines
19 * -----------------------------
20 *
21 * There are three major different types of rings for xHCI, these are:
22 *
23 * 1) Command Rings
24 * 2) Event Rings
25 * 3) Transfer Rings
26 *
27 * Command and Transfer rings function in similar ways while the event rings are
28 * different. The difference comes in who is the consumer and who is the
29 * producer. In the case of command and transfer rings, the driver is the
30 * producer. For the event ring the driver is the consumer.
31 *
32 * Each ring in xhci has a synthetic head and tail register. Each entry in a
33 * ring has a bit that's often referred to as the 'Cycle bit'. The cycle bit is
271 head++;
272 if (head == xrp->xr_ntrb - 1) {
273 head = 0;
274 }
275
276 if (head == xrp->xr_tail)
277 return (B_FALSE);
278 }
279
280 return (B_TRUE);
281 }
282
283 /*
284 * Fill in a TRB in the ring at offset trboff. If cycle is currently set to
285 * B_TRUE, then we fill in the appropriate cycle bit to tell the system to
286 * advance, otherwise we leave the existing cycle bit untouched so the system
287 * doesn't accidentally advance until we have everything filled in.
288 */
289 void
290 xhci_ring_trb_fill(xhci_ring_t *xrp, uint_t trboff, xhci_trb_t *host_trb,
291 uint64_t *trb_pap, boolean_t put_cycle)
292 {
293 uint_t i;
294 uint32_t flags;
295 uint_t ent = xrp->xr_head;
296 uint8_t cycle = xrp->xr_cycle;
297 xhci_trb_t *trb;
298
299 for (i = 0; i < trboff; i++) {
300 ent++;
301 if (ent == xrp->xr_ntrb - 1) {
302 ent = 0;
303 cycle ^= 1;
304 }
305 }
306
307 /*
308 * If we're being asked to not update the cycle for it to be valid to be
309 * produced, we need to xor this once again to get to the inappropriate
310 * value.
311 */
312 if (put_cycle == B_FALSE)
313 cycle ^= 1;
314
315 trb = &xrp->xr_trb[ent];
316
317 trb->trb_addr = host_trb->trb_addr;
318 trb->trb_status = host_trb->trb_status;
319 flags = host_trb->trb_flags;
320 if (cycle == 0) {
321 flags &= ~LE_32(XHCI_TRB_CYCLE);
322 } else {
323 flags |= LE_32(XHCI_TRB_CYCLE);
324 }
325
326 trb->trb_flags = flags;
327
328 if (trb_pap != NULL) {
329 uint64_t pa;
330
331 /*
332 * This logic only works if we have a single cookie address.
333 * However, this is prettty tightly assumed for rings through
334 * the xhci driver at this time.
335 */
336 ASSERT3U(xrp->xr_dma.xdb_ncookies, ==, 1);
337 pa = xrp->xr_dma.xdb_cookies[0].dmac_laddress;
338 pa += ((uintptr_t)trb - (uintptr_t)&xrp->xr_trb[0]);
339 *trb_pap = pa;
340 }
341 }
342
343 /*
344 * Update our metadata for the ring and verify the cycle bit is correctly set
345 * for the first trb. It is expected that it is incorrectly set.
346 */
347 void
348 xhci_ring_trb_produce(xhci_ring_t *xrp, uint_t ntrb)
349 {
350 uint_t i, ohead;
351 xhci_trb_t *trb;
352
353 VERIFY(ntrb > 0);
354
355 ohead = xrp->xr_head;
356
357 /*
358 * As part of updating the head, we need to make sure we correctly
359 * update the cycle bit of the link TRB. So we always do this first
360 * before we update the old head, to try and get a consistent view of
377 trb->trb_flags &= ~XHCI_TRB_CHAIN;
378
379 }
380 trb->trb_flags ^= LE_32(XHCI_TRB_CYCLE);
381 xrp->xr_cycle ^= 1;
382 xrp->xr_head = 0;
383 }
384 }
385
386 trb = &xrp->xr_trb[ohead];
387 trb->trb_flags ^= LE_32(XHCI_TRB_CYCLE);
388 }
389
390 /*
391 * This is a convenience wrapper for the single TRB case to make callers less
392 * likely to mess up some of the required semantics.
393 */
394 void
395 xhci_ring_trb_put(xhci_ring_t *xrp, xhci_trb_t *trb)
396 {
397 xhci_ring_trb_fill(xrp, 0U, trb, NULL, B_FALSE);
398 xhci_ring_trb_produce(xrp, 1U);
399 }
400
401 /*
402 * Update the tail pointer for a ring based on the DMA address of a consumed
403 * entry. Note, this entry indicates what we just processed, therefore we should
404 * bump the tail entry to the next one.
405 */
406 boolean_t
407 xhci_ring_trb_consumed(xhci_ring_t *xrp, uint64_t dma)
408 {
409 uint64_t pa = xhci_dma_pa(&xrp->xr_dma);
410 uint64_t high = pa + xrp->xr_ntrb * sizeof (xhci_trb_t);
411
412 if (dma < pa || dma >= high ||
413 dma % sizeof (xhci_trb_t) != 0)
414 return (B_FALSE);
415
416 dma -= pa;
417 dma /= sizeof (xhci_trb_t);
|