1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2016 Joyent, Inc.
14 */
15
16 /*
17 * xHCI Endpoint Initialization and Management
18 *
19 * Please see the big theory statement in xhci.c for more information.
20 */
21
22 #include <sys/usb/hcd/xhci/xhci.h>
23 #include <sys/sdt.h>
24
25 boolean_t
26 xhci_endpoint_is_periodic_in(xhci_endpoint_t *xep)
27 {
28 usba_pipe_handle_data_t *ph;
29
30 ASSERT(xep != NULL);
31 ph = xep->xep_pipe;
32 ASSERT(ph != NULL);
33
858 * sure that if the ring moves on, it won't see the correct cycle bit.
859 */
860 int
861 xhci_endpoint_schedule(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep,
862 xhci_transfer_t *xt, boolean_t ring)
863 {
864 int i;
865 xhci_ring_t *rp = &xep->xep_ring;
866
867 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
868 ASSERT(xt->xt_ntrbs > 0);
869 ASSERT(xt->xt_trbs != NULL);
870
871 if ((xep->xep_state & XHCI_ENDPOINT_DONT_SCHEDULE) != 0)
872 return (USB_FAILURE);
873
874 if (xhci_ring_trb_space(rp, xt->xt_ntrbs) == B_FALSE)
875 return (USB_NO_RESOURCES);
876
877 for (i = xt->xt_ntrbs - 1; i > 0; i--) {
878 xhci_ring_trb_fill(rp, i, &xt->xt_trbs[i], B_TRUE);
879 }
880 xhci_ring_trb_fill(rp, 0U, &xt->xt_trbs[0], B_FALSE);
881
882 XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
883 xhci_ring_trb_produce(rp, xt->xt_ntrbs);
884 list_insert_tail(&xep->xep_transfers, xt);
885
886 XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
887 if (xhci_check_dma_handle(xhcip, &rp->xr_dma) != DDI_FM_OK) {
888 xhci_error(xhcip, "failed to write out TRB for device on slot "
889 "%d, port %d, and endpoint %u: encountered fatal FM error "
890 "synchronizing ring DMA memory", xd->xd_slot, xd->xd_port,
891 xep->xep_num);
892 xhci_fm_runtime_reset(xhcip);
893 return (USB_HC_HARDWARE_ERROR);
894 }
895
896 if (xep->xep_timeout == 0 &&
897 !(xep->xep_state & XHCI_ENDPOINT_PERIODIC)) {
898 xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
899 drv_usectohz(XHCI_TICK_TIMEOUT_US));
900 }
901
902 xt->xt_sched_time = gethrtime();
903
904 if (ring == B_FALSE)
905 return (USB_SUCCESS);
906
907 return (xhci_endpoint_ring(xhcip, xd, xep));
908 }
909
910 static xhci_transfer_t *
911 xhci_endpoint_determine_transfer(xhci_t *xhcip, xhci_endpoint_t *xep,
912 xhci_trb_t *trb, int *offp)
913 {
914 xhci_transfer_t *xt;
915
916 ASSERT(xhcip != NULL);
917 ASSERT(offp != NULL);
918 ASSERT(xep != NULL);
919 ASSERT(trb != NULL);
920 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
921
922 if ((xt = list_head(&xep->xep_transfers)) == NULL)
923 return (NULL);
924
925 *offp = xhci_ring_trb_valid_range(&xep->xep_ring, LE_64(trb->trb_addr),
926 xt->xt_ntrbs);
927 if (*offp == -1)
928 return (NULL);
929 return (xt);
930 }
931
932 static void
933 xhci_endpoint_reschedule_periodic(xhci_t *xhcip, xhci_device_t *xd,
934 xhci_endpoint_t *xep, xhci_transfer_t *xt)
935 {
936 int ret;
937 xhci_pipe_t *xp = (xhci_pipe_t *)xep->xep_pipe->p_hcd_private;
938 xhci_periodic_pipe_t *xpp = &xp->xp_periodic;
939
940 ASSERT3U(xpp->xpp_tsize, >, 0);
941
942 xt->xt_short = 0;
943 xt->xt_cr = USB_CR_OK;
944
945 mutex_enter(&xhcip->xhci_lock);
946
947 /*
948 * If we don't have an active poll, then we shouldn't bother trying to
949 * reschedule it. This means that we're trying to stop or we ran out of
978 xd->xd_slot, ret);
979 }
980 mutex_exit(&xhcip->xhci_lock);
981 }
982
983 /*
984 * We're dealing with a message on a control endpoint. This may be a default
985 * endpoint or otherwise. These usually come in groups of 3+ TRBs where you have
986 * a setup stage, data stage (which may have one or more other TRBs) and then a
987 * final status stage.
988 *
989 * We generally set ourselves up such that we get interrupted and notified only
990 * on the status stage and for short transfers in the data stage. If we
991 * encounter a short transfer in the data stage, then we need to go through and
992 * check whether or not the short transfer is allowed. If it is, then there's
993 * nothing to do. We'll update everything and call back the framework once we
994 * get the status stage.
995 */
996 static boolean_t
997 xhci_endpoint_control_callback(xhci_t *xhcip, xhci_device_t *xd,
998 xhci_endpoint_t *xep, xhci_transfer_t *xt, int off, xhci_trb_t *trb)
999 {
1000 int code;
1001 usb_ctrl_req_t *ucrp;
1002 xhci_transfer_t *rem;
1003
1004 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1005
1006 code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1007 ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1008
1009 /*
1010 * Now that we know what this TRB is for, was it for a data/normal stage
1011 * or is it the status stage. We cheat by looking at the last entry. If
1012 * it's a data stage, then we must have gotten a short write. In that
1013 * case, we should go through and check to make sure it's allowed. If
1014 * not, we need to fail the transfer, try to stop the ring, and make
1015 * callbacks. We'll clean up the xhci transfer at this time.
1016 */
1017 if (off != xt->xt_ntrbs - 1) {
1018 uint_t remain;
1019 usb_ctrl_req_t *ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1020
1021 /*
1022 * This is a data stage TRB. The only reason we should have
1023 * gotten something for this is beacuse it was short. Make sure
1024 * it's okay before we continue.
1025 */
1026 VERIFY3S(code, ==, XHCI_CODE_SHORT_XFER);
1027 if (!(ucrp->ctrl_attributes & USB_ATTRS_SHORT_XFER_OK)) {
1028 xt->xt_cr = USB_CR_DATA_UNDERRUN;
1029 mutex_exit(&xhcip->xhci_lock);
1030 return (B_TRUE);
1031 }
1032
1033 /*
1034 * The value in the resulting trb is how much data remained to
1035 * be transferred. Normalize that against the original buffer
1133 {
1134 xhci_device_t *xd;
1135
1136 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1137
1138 for (xd = list_head(&xhcip->xhci_usba.xa_devices); xd != NULL;
1139 xd = list_next(&xhcip->xhci_usba.xa_devices, xd)) {
1140 if (xd->xd_slot == slot)
1141 return (xd);
1142 }
1143
1144 return (NULL);
1145 }
1146
1147 /*
1148 * Handle things which consist solely of normal tranfers, in other words, bulk
1149 * and interrupt transfers.
1150 */
1151 static boolean_t
1152 xhci_endpoint_norm_callback(xhci_t *xhcip, xhci_device_t *xd,
1153 xhci_endpoint_t *xep, xhci_transfer_t *xt, int off, xhci_trb_t *trb)
1154 {
1155 int code;
1156 usb_cr_t cr;
1157 xhci_transfer_t *rem;
1158 int attrs;
1159 mblk_t *mp;
1160 boolean_t periodic = B_FALSE;
1161 usb_opaque_t urp;
1162
1163 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1164 ASSERT(xep->xep_type == USB_EP_ATTR_BULK ||
1165 xep->xep_type == USB_EP_ATTR_INTR);
1166
1167 code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1168
1169 if (code == XHCI_CODE_SHORT_XFER) {
1170 int residue;
1171 residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1172 xt->xt_short = xt->xt_buffer.xdb_len - residue;
1173 }
1174
1175 /*
1176 * If we have an interrupt from something that's not the last entry,
1177 * that must mean we had a short transfer, so there's nothing more for
1178 * us to do at the moment. We won't call back until everything's
1179 * finished for the general transfer.
1180 */
1181 if (off < xt->xt_ntrbs - 1) {
1182 mutex_exit(&xhcip->xhci_lock);
1183 return (B_TRUE);
1184 }
1185
1186 urp = xt->xt_usba_req;
1187 if (xep->xep_type == USB_EP_ATTR_BULK) {
1188 usb_bulk_req_t *ubrp = (usb_bulk_req_t *)xt->xt_usba_req;
1189 attrs = ubrp->bulk_attributes;
1190 mp = ubrp->bulk_data;
1191 } else {
1192 usb_intr_req_t *uirp = (usb_intr_req_t *)xt->xt_usba_req;
1193
1221 }
1222
1223 if (xhci_transfer_sync(xhcip, xt, DDI_DMA_SYNC_FORCPU) !=
1224 DDI_FM_OK) {
1225 xhci_error(xhcip, "failed to process normal transfer "
1226 "callback for endpoint %u of device on slot %d and "
1227 "port %d: encountered fatal FM error synchronizing "
1228 "DMA memory, resetting device", xep->xep_num,
1229 xd->xd_slot, xd->xd_port);
1230 xhci_fm_runtime_reset(xhcip);
1231 mutex_exit(&xhcip->xhci_lock);
1232 return (B_FALSE);
1233 }
1234
1235 xhci_transfer_copy(xt, mp->b_rptr, len, B_TRUE);
1236 mp->b_wptr += len;
1237 }
1238 cr = USB_CR_OK;
1239
1240 out:
1241 VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, LE_64(trb->trb_addr)));
1242 rem = list_remove_head(&xep->xep_transfers);
1243 VERIFY3P(rem, ==, xt);
1244 mutex_exit(&xhcip->xhci_lock);
1245
1246 usba_hcdi_cb(xep->xep_pipe, urp, cr);
1247 if (periodic == B_TRUE) {
1248 xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1249 } else {
1250 xhci_transfer_free(xhcip, xt);
1251 }
1252
1253 return (B_TRUE);
1254 }
1255
1256 static boolean_t
1257 xhci_endpoint_isoch_callback(xhci_t *xhcip, xhci_device_t *xd,
1258 xhci_endpoint_t *xep, xhci_transfer_t *xt, int off, xhci_trb_t *trb)
1259 {
1260 int code;
1261 usb_cr_t cr;
1262 xhci_transfer_t *rem;
1263 usb_isoc_pkt_descr_t *desc;
1264 usb_isoc_req_t *usrp;
1265
1266 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1267 ASSERT3S(xep->xep_type, ==, USB_EP_ATTR_ISOCH);
1268
1269 code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1270
1271 /*
1272 * The descriptors that we copy the data from are set up to assume that
1273 * everything was OK and we transferred all the requested data.
1274 */
1275 desc = &xt->xt_isoc[off];
1276 if (code == XHCI_CODE_SHORT_XFER) {
1277 int residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1278 desc->isoc_pkt_actual_length -= residue;
1328
1329 if (cr == USB_CR_OK) {
1330 bcopy(xt->xt_isoc, usrp->isoc_pkt_descr,
1331 sizeof (usb_isoc_pkt_descr_t) * usrp->isoc_pkts_count);
1332 }
1333
1334 usba_hcdi_cb(xep->xep_pipe, (usb_opaque_t)usrp, cr);
1335 if (xt->xt_data_tohost == B_TRUE) {
1336 xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1337 } else {
1338 xhci_transfer_free(xhcip, xt);
1339 }
1340
1341 return (B_TRUE);
1342 }
1343
1344 boolean_t
1345 xhci_endpoint_transfer_callback(xhci_t *xhcip, xhci_trb_t *trb)
1346 {
1347 boolean_t ret;
1348 int slot, endpoint, code, off;
1349 xhci_device_t *xd;
1350 xhci_endpoint_t *xep;
1351 xhci_transfer_t *xt;
1352 boolean_t transfer_done;
1353
1354 endpoint = XHCI_TRB_GET_EP(LE_32(trb->trb_flags));
1355 slot = XHCI_TRB_GET_SLOT(LE_32(trb->trb_flags));
1356 code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1357
1358 mutex_enter(&xhcip->xhci_lock);
1359 xd = xhci_device_lookup_by_slot(xhcip, slot);
1360 if (xd == NULL) {
1361 xhci_error(xhcip, "received transfer trb with code %d for "
1362 "unknown slot %d and endpoint %d: resetting device", code,
1363 slot, endpoint);
1364 mutex_exit(&xhcip->xhci_lock);
1365 xhci_fm_runtime_reset(xhcip);
1366 return (B_FALSE);
1367 }
1368
1369 /*
1370 * Endpoint IDs are indexed based on their Device Context Index, which
1371 * means that we need to subtract one to get the actual ID that we use.
1372 */
1373 xep = xd->xd_endpoints[endpoint - 1];
1374 if (xep == NULL) {
1375 xhci_error(xhcip, "received transfer trb with code %d, slot "
1376 "%d, and unknown endpoint %d: resetting device", code,
1377 slot, endpoint);
1378 mutex_exit(&xhcip->xhci_lock);
1379 xhci_fm_runtime_reset(xhcip);
1380 return (B_FALSE);
1381 }
1382
1383 /*
1384 * This TRB should be part of a transfer. If it's not, then we ignore
1385 * it. We also check whether or not it's for the first transfer. Because
1386 * the rings are serviced in order, it should be.
1387 */
1388 if ((xt = xhci_endpoint_determine_transfer(xhcip, xep, trb, &off)) ==
1389 NULL) {
1390 mutex_exit(&xhcip->xhci_lock);
1391 return (B_TRUE);
1392 }
1393
1394 transfer_done = B_FALSE;
1395
1396 switch (code) {
1397 case XHCI_CODE_SUCCESS:
1398 case XHCI_CODE_SHORT_XFER:
1399 /* Handled by endpoint logic */
1400 break;
1401 case XHCI_CODE_XFER_STOPPED:
1402 case XHCI_CODE_XFER_STOPINV:
1403 case XHCI_CODE_XFER_STOPSHORT:
1404 /*
1405 * This causes us to transition the endpoint to a stopped state.
1406 * Each of these indicate a different possible state that we
1407 * have to deal with. Effectively we're going to drop it and
1408 * leave it up to the consumers to figure out what to do. For
1409 * the moment, that's generally okay because stops are only used
1410 * in cases where we're cleaning up outstanding reqs, etc.
1411 */
1412 mutex_exit(&xhcip->xhci_lock);
1413 return (B_TRUE);
1414 case XHCI_CODE_STALL:
1415 /*
1416 * This causes us to transition to the halted state;
1417 * however, downstream clients are able to handle this just
1418 * fine.
1419 */
1420 xep->xep_state |= XHCI_ENDPOINT_HALTED;
1421 xt->xt_cr = USB_CR_STALL;
1422 transfer_done = B_TRUE;
1423 break;
1424 case XHCI_CODE_BABBLE:
1425 transfer_done = B_TRUE;
1426 xt->xt_cr = USB_CR_DATA_OVERRUN;
1427 xep->xep_state |= XHCI_ENDPOINT_HALTED;
1428 break;
1429 case XHCI_CODE_TXERR:
1430 case XHCI_CODE_SPLITERR:
1431 transfer_done = B_TRUE;
1432 xt->xt_cr = USB_CR_DEV_NOT_RESP;
1433 xep->xep_state |= XHCI_ENDPOINT_HALTED;
1434 break;
1435 default:
1436 /*
1437 * Treat these as general unspecified errors that don't cause a
1438 * stop of the ring. Even if it does, a subsequent timeout
1439 * should occur which causes us to end up dropping a pipe reset
1440 * or at least issuing a reset of the device as part of
1441 * quiescing.
1442 */
1443 transfer_done = B_TRUE;
1444 break;
1445 }
1446
1447 if (transfer_done == B_TRUE) {
1448 xhci_transfer_t *alt;
1449
1450 alt = list_remove_head(&xep->xep_transfers);
1451 VERIFY3P(alt, ==, xt);
1452 mutex_exit(&xhcip->xhci_lock);
1453 if (xt->xt_usba_req == NULL) {
1454 usb_opaque_t urp;
1455
1456 urp = xhci_endpoint_dup_periodic(xep, xt, &xt->xt_cr);
1457 usba_hcdi_cb(xep->xep_pipe, urp, xt->xt_cr);
1458 } else {
1459 usba_hcdi_cb(xep->xep_pipe,
1460 (usb_opaque_t)xt->xt_usba_req, xt->xt_cr);
1461 xhci_transfer_free(xhcip, xt);
1462 }
1463 return (B_TRUE);
|
1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright (c) 2018, Joyent, Inc.
14 */
15
16 /*
17 * xHCI Endpoint Initialization and Management
18 *
19 * Please see the big theory statement in xhci.c for more information.
20 */
21
22 #include <sys/usb/hcd/xhci/xhci.h>
23 #include <sys/sdt.h>
24
25 boolean_t
26 xhci_endpoint_is_periodic_in(xhci_endpoint_t *xep)
27 {
28 usba_pipe_handle_data_t *ph;
29
30 ASSERT(xep != NULL);
31 ph = xep->xep_pipe;
32 ASSERT(ph != NULL);
33
858 * sure that if the ring moves on, it won't see the correct cycle bit.
859 */
860 int
861 xhci_endpoint_schedule(xhci_t *xhcip, xhci_device_t *xd, xhci_endpoint_t *xep,
862 xhci_transfer_t *xt, boolean_t ring)
863 {
864 int i;
865 xhci_ring_t *rp = &xep->xep_ring;
866
867 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
868 ASSERT(xt->xt_ntrbs > 0);
869 ASSERT(xt->xt_trbs != NULL);
870
871 if ((xep->xep_state & XHCI_ENDPOINT_DONT_SCHEDULE) != 0)
872 return (USB_FAILURE);
873
874 if (xhci_ring_trb_space(rp, xt->xt_ntrbs) == B_FALSE)
875 return (USB_NO_RESOURCES);
876
877 for (i = xt->xt_ntrbs - 1; i > 0; i--) {
878 xhci_ring_trb_fill(rp, i, &xt->xt_trbs[i], &xt->xt_trbs_pa[i],
879 B_TRUE);
880 }
881 xhci_ring_trb_fill(rp, 0U, &xt->xt_trbs[0], &xt->xt_trbs_pa[0],
882 B_FALSE);
883
884 XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
885 xhci_ring_trb_produce(rp, xt->xt_ntrbs);
886 list_insert_tail(&xep->xep_transfers, xt);
887
888 XHCI_DMA_SYNC(rp->xr_dma, DDI_DMA_SYNC_FORDEV);
889 if (xhci_check_dma_handle(xhcip, &rp->xr_dma) != DDI_FM_OK) {
890 xhci_error(xhcip, "failed to write out TRB for device on slot "
891 "%d, port %d, and endpoint %u: encountered fatal FM error "
892 "synchronizing ring DMA memory", xd->xd_slot, xd->xd_port,
893 xep->xep_num);
894 xhci_fm_runtime_reset(xhcip);
895 return (USB_HC_HARDWARE_ERROR);
896 }
897
898 if (xep->xep_timeout == 0 &&
899 !(xep->xep_state & XHCI_ENDPOINT_PERIODIC)) {
900 xep->xep_timeout = timeout(xhci_endpoint_tick, xep,
901 drv_usectohz(XHCI_TICK_TIMEOUT_US));
902 }
903
904 xt->xt_sched_time = gethrtime();
905
906 if (ring == B_FALSE)
907 return (USB_SUCCESS);
908
909 return (xhci_endpoint_ring(xhcip, xd, xep));
910 }
911
912 static xhci_transfer_t *
913 xhci_endpoint_determine_transfer(xhci_t *xhcip, xhci_endpoint_t *xep,
914 xhci_trb_t *trb, uint_t *offp)
915 {
916 uint_t i;
917 uint64_t addr;
918 xhci_transfer_t *xt;
919
920 ASSERT(xhcip != NULL);
921 ASSERT(offp != NULL);
922 ASSERT(xep != NULL);
923 ASSERT(trb != NULL);
924 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
925
926 if ((xt = list_head(&xep->xep_transfers)) == NULL)
927 return (NULL);
928
929 addr = LE_64(trb->trb_addr);
930
931 /*
932 * Check if this is the simple case of an event data. If it is, then all
933 * we need to do is look and see its data matches the address of the
934 * transfer.
935 */
936 if (XHCI_TRB_GET_ED(LE_32(trb->trb_flags)) != 0) {
937 if (LE_64(trb->trb_addr) != (uintptr_t)xt)
938 return (NULL);
939
940 *offp = xt->xt_ntrbs - 1;
941 return (xt);
942 }
943
944 /*
945 * This represents an error that has occurred. We need to check two
946 * different things. The first is that the TRB PA maps to one of the
947 * TRBs in the transfer. Secondly, we need to make sure that it makes
948 * sense in the context of the ring and our notion of where the tail is.
949 */
950 for (i = 0; i < xt->xt_ntrbs; i++) {
951 if (xt->xt_trbs_pa[i] == addr)
952 break;
953 }
954
955 if (i == xt->xt_ntrbs)
956 return (NULL);
957
958 if (xhci_ring_trb_valid_range(&xep->xep_ring, LE_64(trb->trb_addr),
959 xt->xt_ntrbs) == -1)
960 return (NULL);
961
962 *offp = i;
963 return (xt);
964 }
965
966 static void
967 xhci_endpoint_reschedule_periodic(xhci_t *xhcip, xhci_device_t *xd,
968 xhci_endpoint_t *xep, xhci_transfer_t *xt)
969 {
970 int ret;
971 xhci_pipe_t *xp = (xhci_pipe_t *)xep->xep_pipe->p_hcd_private;
972 xhci_periodic_pipe_t *xpp = &xp->xp_periodic;
973
974 ASSERT3U(xpp->xpp_tsize, >, 0);
975
976 xt->xt_short = 0;
977 xt->xt_cr = USB_CR_OK;
978
979 mutex_enter(&xhcip->xhci_lock);
980
981 /*
982 * If we don't have an active poll, then we shouldn't bother trying to
983 * reschedule it. This means that we're trying to stop or we ran out of
1012 xd->xd_slot, ret);
1013 }
1014 mutex_exit(&xhcip->xhci_lock);
1015 }
1016
1017 /*
1018 * We're dealing with a message on a control endpoint. This may be a default
1019 * endpoint or otherwise. These usually come in groups of 3+ TRBs where you have
1020 * a setup stage, data stage (which may have one or more other TRBs) and then a
1021 * final status stage.
1022 *
1023 * We generally set ourselves up such that we get interrupted and notified only
1024 * on the status stage and for short transfers in the data stage. If we
1025 * encounter a short transfer in the data stage, then we need to go through and
1026 * check whether or not the short transfer is allowed. If it is, then there's
1027 * nothing to do. We'll update everything and call back the framework once we
1028 * get the status stage.
1029 */
1030 static boolean_t
1031 xhci_endpoint_control_callback(xhci_t *xhcip, xhci_device_t *xd,
1032 xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1033 {
1034 int code;
1035 usb_ctrl_req_t *ucrp;
1036 xhci_transfer_t *rem;
1037
1038 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1039
1040 code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1041 ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1042
1043 /*
1044 * Now that we know what this TRB is for, was it for a data/normal stage
1045 * or is it the status stage. We cheat by looking at the last entry. If
1046 * it's a data stage, then we must have gotten a short write. We record
1047 * this fact and whether we should consider the transfer fatal for the
1048 * subsequent status stage.
1049 */
1050 if (off != xt->xt_ntrbs - 1) {
1051 uint_t remain;
1052 usb_ctrl_req_t *ucrp = (usb_ctrl_req_t *)xt->xt_usba_req;
1053
1054 /*
1055 * This is a data stage TRB. The only reason we should have
1056 * gotten something for this is beacuse it was short. Make sure
1057 * it's okay before we continue.
1058 */
1059 VERIFY3S(code, ==, XHCI_CODE_SHORT_XFER);
1060 if (!(ucrp->ctrl_attributes & USB_ATTRS_SHORT_XFER_OK)) {
1061 xt->xt_cr = USB_CR_DATA_UNDERRUN;
1062 mutex_exit(&xhcip->xhci_lock);
1063 return (B_TRUE);
1064 }
1065
1066 /*
1067 * The value in the resulting trb is how much data remained to
1068 * be transferred. Normalize that against the original buffer
1166 {
1167 xhci_device_t *xd;
1168
1169 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1170
1171 for (xd = list_head(&xhcip->xhci_usba.xa_devices); xd != NULL;
1172 xd = list_next(&xhcip->xhci_usba.xa_devices, xd)) {
1173 if (xd->xd_slot == slot)
1174 return (xd);
1175 }
1176
1177 return (NULL);
1178 }
1179
1180 /*
1181 * Handle things which consist solely of normal tranfers, in other words, bulk
1182 * and interrupt transfers.
1183 */
1184 static boolean_t
1185 xhci_endpoint_norm_callback(xhci_t *xhcip, xhci_device_t *xd,
1186 xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1187 {
1188 int code;
1189 usb_cr_t cr;
1190 xhci_transfer_t *rem;
1191 int attrs;
1192 mblk_t *mp;
1193 boolean_t periodic = B_FALSE;
1194 usb_opaque_t urp;
1195
1196 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1197 ASSERT(xep->xep_type == USB_EP_ATTR_BULK ||
1198 xep->xep_type == USB_EP_ATTR_INTR);
1199
1200 code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1201
1202 if (code == XHCI_CODE_SHORT_XFER) {
1203 uint_t residue;
1204 residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1205
1206 if (xep->xep_type == USB_EP_ATTR_BULK) {
1207 VERIFY3U(XHCI_TRB_GET_ED(LE_32(trb->trb_flags)), !=, 0);
1208 xt->xt_short = residue;
1209 } else {
1210 xt->xt_short = xt->xt_buffer.xdb_len - residue;
1211 }
1212 }
1213
1214 /*
1215 * If we have an interrupt from something that's not the last entry,
1216 * that must mean we had a short transfer, so there's nothing more for
1217 * us to do at the moment. We won't call back until everything's
1218 * finished for the general transfer.
1219 */
1220 if (off < xt->xt_ntrbs - 1) {
1221 mutex_exit(&xhcip->xhci_lock);
1222 return (B_TRUE);
1223 }
1224
1225 urp = xt->xt_usba_req;
1226 if (xep->xep_type == USB_EP_ATTR_BULK) {
1227 usb_bulk_req_t *ubrp = (usb_bulk_req_t *)xt->xt_usba_req;
1228 attrs = ubrp->bulk_attributes;
1229 mp = ubrp->bulk_data;
1230 } else {
1231 usb_intr_req_t *uirp = (usb_intr_req_t *)xt->xt_usba_req;
1232
1260 }
1261
1262 if (xhci_transfer_sync(xhcip, xt, DDI_DMA_SYNC_FORCPU) !=
1263 DDI_FM_OK) {
1264 xhci_error(xhcip, "failed to process normal transfer "
1265 "callback for endpoint %u of device on slot %d and "
1266 "port %d: encountered fatal FM error synchronizing "
1267 "DMA memory, resetting device", xep->xep_num,
1268 xd->xd_slot, xd->xd_port);
1269 xhci_fm_runtime_reset(xhcip);
1270 mutex_exit(&xhcip->xhci_lock);
1271 return (B_FALSE);
1272 }
1273
1274 xhci_transfer_copy(xt, mp->b_rptr, len, B_TRUE);
1275 mp->b_wptr += len;
1276 }
1277 cr = USB_CR_OK;
1278
1279 out:
1280 /*
1281 * Don't use the address from the TRB here. When we're dealing with
1282 * event data that will be entirely wrong.
1283 */
1284 VERIFY(xhci_ring_trb_consumed(&xep->xep_ring, xt->xt_trbs_pa[off]));
1285 rem = list_remove_head(&xep->xep_transfers);
1286 VERIFY3P(rem, ==, xt);
1287 mutex_exit(&xhcip->xhci_lock);
1288
1289 usba_hcdi_cb(xep->xep_pipe, urp, cr);
1290 if (periodic == B_TRUE) {
1291 xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1292 } else {
1293 xhci_transfer_free(xhcip, xt);
1294 }
1295
1296 return (B_TRUE);
1297 }
1298
1299 static boolean_t
1300 xhci_endpoint_isoch_callback(xhci_t *xhcip, xhci_device_t *xd,
1301 xhci_endpoint_t *xep, xhci_transfer_t *xt, uint_t off, xhci_trb_t *trb)
1302 {
1303 int code;
1304 usb_cr_t cr;
1305 xhci_transfer_t *rem;
1306 usb_isoc_pkt_descr_t *desc;
1307 usb_isoc_req_t *usrp;
1308
1309 ASSERT(MUTEX_HELD(&xhcip->xhci_lock));
1310 ASSERT3S(xep->xep_type, ==, USB_EP_ATTR_ISOCH);
1311
1312 code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1313
1314 /*
1315 * The descriptors that we copy the data from are set up to assume that
1316 * everything was OK and we transferred all the requested data.
1317 */
1318 desc = &xt->xt_isoc[off];
1319 if (code == XHCI_CODE_SHORT_XFER) {
1320 int residue = XHCI_TRB_REMAIN(LE_32(trb->trb_status));
1321 desc->isoc_pkt_actual_length -= residue;
1371
1372 if (cr == USB_CR_OK) {
1373 bcopy(xt->xt_isoc, usrp->isoc_pkt_descr,
1374 sizeof (usb_isoc_pkt_descr_t) * usrp->isoc_pkts_count);
1375 }
1376
1377 usba_hcdi_cb(xep->xep_pipe, (usb_opaque_t)usrp, cr);
1378 if (xt->xt_data_tohost == B_TRUE) {
1379 xhci_endpoint_reschedule_periodic(xhcip, xd, xep, xt);
1380 } else {
1381 xhci_transfer_free(xhcip, xt);
1382 }
1383
1384 return (B_TRUE);
1385 }
1386
1387 boolean_t
1388 xhci_endpoint_transfer_callback(xhci_t *xhcip, xhci_trb_t *trb)
1389 {
1390 boolean_t ret;
1391 int slot, endpoint, code;
1392 uint_t off;
1393 xhci_device_t *xd;
1394 xhci_endpoint_t *xep;
1395 xhci_transfer_t *xt;
1396 boolean_t transfer_done;
1397
1398 endpoint = XHCI_TRB_GET_EP(LE_32(trb->trb_flags));
1399 slot = XHCI_TRB_GET_SLOT(LE_32(trb->trb_flags));
1400 code = XHCI_TRB_GET_CODE(LE_32(trb->trb_status));
1401
1402 switch (code) {
1403 case XHCI_CODE_RING_UNDERRUN:
1404 case XHCI_CODE_RING_OVERRUN:
1405 /*
1406 * If we have an ISOC overrun or underrun then there will be no
1407 * valid data pointer in the TRB associated with it. Just drive
1408 * on.
1409 */
1410 return (B_TRUE);
1411 case XHCI_CODE_UNDEFINED:
1412 xhci_error(xhcip, "received transfer trb with undefined fatal "
1413 "error: resetting device");
1414 xhci_fm_runtime_reset(xhcip);
1415 return (B_FALSE);
1416 case XHCI_CODE_XFER_STOPPED:
1417 case XHCI_CODE_XFER_STOPINV:
1418 case XHCI_CODE_XFER_STOPSHORT:
1419 /*
1420 * This causes us to transition the endpoint to a stopped state.
1421 * Each of these indicate a different possible state that we
1422 * have to deal with. Effectively we're going to drop it and
1423 * leave it up to the consumers to figure out what to do. For
1424 * the moment, that's generally okay because stops are only used
1425 * in cases where we're cleaning up outstanding reqs, etc.
1426 *
1427 * We do this before we check for the corresponding transfer as
1428 * this will generally be generated by a command issued that's
1429 * stopping the ring.
1430 */
1431 return (B_TRUE);
1432 default:
1433 break;
1434 }
1435
1436 mutex_enter(&xhcip->xhci_lock);
1437 xd = xhci_device_lookup_by_slot(xhcip, slot);
1438 if (xd == NULL) {
1439 xhci_error(xhcip, "received transfer trb with code %d for "
1440 "unknown slot %d and endpoint %d: resetting device", code,
1441 slot, endpoint);
1442 mutex_exit(&xhcip->xhci_lock);
1443 xhci_fm_runtime_reset(xhcip);
1444 return (B_FALSE);
1445 }
1446
1447 /*
1448 * Endpoint IDs are indexed based on their Device Context Index, which
1449 * means that we need to subtract one to get the actual ID that we use.
1450 */
1451 xep = xd->xd_endpoints[endpoint - 1];
1452 if (xep == NULL) {
1453 xhci_error(xhcip, "received transfer trb with code %d, slot "
1454 "%d, and unknown endpoint %d: resetting device", code,
1455 slot, endpoint);
1456 mutex_exit(&xhcip->xhci_lock);
1457 xhci_fm_runtime_reset(xhcip);
1458 return (B_FALSE);
1459 }
1460
1461 /*
1462 * The TRB that we recieved may be an event data TRB for a bulk
1463 * endpoint, a normal or short completion for any other endpoint or an
1464 * error. In all cases, we need to figure out what transfer this
1465 * corresponds to. If this is an error, then we need to make sure that
1466 * the generating ring has been cleaned up.
1467 *
1468 * TRBs should be delivered in order, based on the ring. If for some
1469 * reason we find something that doesn't add up here, then we need to
1470 * assume that something has gone horribly wrong in the system and issue
1471 * a runtime reset. We issue the runtime reset rather than just trying
1472 * to stop and flush the ring, because it's unclear if we could stop
1473 * the ring in time.
1474 */
1475 if ((xt = xhci_endpoint_determine_transfer(xhcip, xep, trb, &off)) ==
1476 NULL) {
1477 xhci_error(xhcip, "received transfer trb with code %d, slot "
1478 "%d, and endpoint %d, but does not match current transfer "
1479 "for endpoint: resetting device", code, slot, endpoint);
1480 mutex_exit(&xhcip->xhci_lock);
1481 xhci_fm_runtime_reset(xhcip);
1482 return (B_FALSE);
1483 }
1484
1485 transfer_done = B_FALSE;
1486
1487 switch (code) {
1488 case XHCI_CODE_SUCCESS:
1489 case XHCI_CODE_SHORT_XFER:
1490 /* Handled by endpoint logic */
1491 break;
1492 case XHCI_CODE_STALL:
1493 /*
1494 * This causes us to transition to the halted state;
1495 * however, downstream clients are able to handle this just
1496 * fine.
1497 */
1498 xep->xep_state |= XHCI_ENDPOINT_HALTED;
1499 xt->xt_cr = USB_CR_STALL;
1500 transfer_done = B_TRUE;
1501 break;
1502 case XHCI_CODE_BABBLE:
1503 transfer_done = B_TRUE;
1504 xt->xt_cr = USB_CR_DATA_OVERRUN;
1505 xep->xep_state |= XHCI_ENDPOINT_HALTED;
1506 break;
1507 case XHCI_CODE_TXERR:
1508 case XHCI_CODE_SPLITERR:
1509 transfer_done = B_TRUE;
1510 xt->xt_cr = USB_CR_DEV_NOT_RESP;
1511 xep->xep_state |= XHCI_ENDPOINT_HALTED;
1512 break;
1513 case XHCI_CODE_BW_OVERRUN:
1514 transfer_done = B_TRUE;
1515 xt->xt_cr = USB_CR_DATA_OVERRUN;
1516 break;
1517 case XHCI_CODE_DATA_BUF:
1518 transfer_done = B_TRUE;
1519 if (xt->xt_data_tohost)
1520 xt->xt_cr = USB_CR_DATA_OVERRUN;
1521 else
1522 xt->xt_cr = USB_CR_DATA_UNDERRUN;
1523 break;
1524 default:
1525 /*
1526 * Treat these as general unspecified errors that don't cause a
1527 * stop of the ring. Even if it does, a subsequent timeout
1528 * should occur which causes us to end up dropping a pipe reset
1529 * or at least issuing a reset of the device as part of
1530 * quiescing.
1531 */
1532 transfer_done = B_TRUE;
1533 xt->xt_cr = USB_CR_HC_HARDWARE_ERR;
1534 break;
1535 }
1536
1537 if (transfer_done == B_TRUE) {
1538 xhci_transfer_t *alt;
1539
1540 alt = list_remove_head(&xep->xep_transfers);
1541 VERIFY3P(alt, ==, xt);
1542 mutex_exit(&xhcip->xhci_lock);
1543 if (xt->xt_usba_req == NULL) {
1544 usb_opaque_t urp;
1545
1546 urp = xhci_endpoint_dup_periodic(xep, xt, &xt->xt_cr);
1547 usba_hcdi_cb(xep->xep_pipe, urp, xt->xt_cr);
1548 } else {
1549 usba_hcdi_cb(xep->xep_pipe,
1550 (usb_opaque_t)xt->xt_usba_req, xt->xt_cr);
1551 xhci_transfer_free(xhcip, xt);
1552 }
1553 return (B_TRUE);
|