Print this page
MFV: illumos-gate@9a48f6c443e5968307491ba7cc134bbdd0328801
9806 ehci_take_control() can infinite loop due to PCI invalid reads
Reviewed by: Dan McDonald <danmcd@joyent.com>
Reviewed by: Jason King <jason.king@joyent.com>
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Toomas Soome <tsoome@me.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Approved by: Richard Lowe <richlowe@richlowe.net>
Author: Robert Mustacchi <rm@joyent.com>
NEX-16600 "No SOF interrupts have been received" on HPE ProLiant DL380 Gen10, leading to non-working USB EHCI controller
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c
+++ new/usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 + * Copyright (c) 2018, Joyent, Inc.
24 25 */
25 26
26 27 /*
28 + * Copyright 2018 Nexenta Systems, Inc.
29 + */
30 +
31 +/*
27 32 * EHCI Host Controller Driver (EHCI)
28 33 *
29 34 * The EHCI driver is a software driver which interfaces to the Universal
30 35 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
31 36 * the Host Controller is defined by the EHCI Host Controller Interface.
32 37 *
33 38 * This module contains the main EHCI driver code which handles all USB
34 39 * transfers, bandwidth allocations and other general functionalities.
35 40 */
36 41
37 42 #include <sys/usb/hcd/ehci/ehcid.h>
38 43 #include <sys/usb/hcd/ehci/ehci_isoch.h>
39 44 #include <sys/usb/hcd/ehci/ehci_xfer.h>
40 45
41 46 /*
42 47 * EHCI MSI tunable:
43 48 *
44 49 * By default MSI is enabled on all supported platforms except for the
45 50 * EHCI controller of ULI1575 South bridge.
46 51 */
47 52 boolean_t ehci_enable_msi = B_TRUE;
48 53
49 54 /* Pointer to the state structure */
50 55 extern void *ehci_statep;
51 56
52 57 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *);
53 58
54 59 extern uint_t ehci_vt62x2_workaround;
55 60 extern int force_ehci_off;
56 61
57 62 /* Adjustable variables for the size of the pools */
58 63 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE;
59 64 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE;
60 65
61 66 /*
62 67 * Initialize the values which the order of 32ms intr qh are executed
63 68 * by the host controller in the lattice tree.
64 69 */
65 70 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] =
66 71 {0x00, 0x10, 0x08, 0x18,
67 72 0x04, 0x14, 0x0c, 0x1c,
68 73 0x02, 0x12, 0x0a, 0x1a,
69 74 0x06, 0x16, 0x0e, 0x1e,
70 75 0x01, 0x11, 0x09, 0x19,
71 76 0x05, 0x15, 0x0d, 0x1d,
72 77 0x03, 0x13, 0x0b, 0x1b,
73 78 0x07, 0x17, 0x0f, 0x1f};
74 79
75 80 /*
76 81 * Initialize the values which are used to calculate start split mask
77 82 * for the low/full/high speed interrupt and isochronous endpoints.
78 83 */
79 84 static uint_t ehci_start_split_mask[15] = {
80 85 /*
81 86 * For high/full/low speed usb devices. For high speed
82 87 * device with polling interval greater than or equal
83 88 * to 8us (125us).
84 89 */
85 90 0x01, /* 00000001 */
86 91 0x02, /* 00000010 */
87 92 0x04, /* 00000100 */
88 93 0x08, /* 00001000 */
89 94 0x10, /* 00010000 */
90 95 0x20, /* 00100000 */
91 96 0x40, /* 01000000 */
92 97 0x80, /* 10000000 */
93 98
94 99 /* Only for high speed devices with polling interval 4us */
95 100 0x11, /* 00010001 */
96 101 0x22, /* 00100010 */
97 102 0x44, /* 01000100 */
98 103 0x88, /* 10001000 */
99 104
100 105 /* Only for high speed devices with polling interval 2us */
101 106 0x55, /* 01010101 */
102 107 0xaa, /* 10101010 */
103 108
104 109 /* Only for high speed devices with polling interval 1us */
105 110 0xff /* 11111111 */
106 111 };
107 112
108 113 /*
109 114 * Initialize the values which are used to calculate complete split mask
110 115 * for the low/full speed interrupt and isochronous endpoints.
111 116 */
112 117 static uint_t ehci_intr_complete_split_mask[7] = {
113 118 /* Only full/low speed devices */
114 119 0x1c, /* 00011100 */
115 120 0x38, /* 00111000 */
116 121 0x70, /* 01110000 */
117 122 0xe0, /* 11100000 */
118 123 0x00, /* Need FSTN feature */
119 124 0x00, /* Need FSTN feature */
120 125 0x00 /* Need FSTN feature */
121 126 };
122 127
123 128
124 129 /*
125 130 * EHCI Internal Function Prototypes
126 131 */
127 132
128 133 /* Host Controller Driver (HCD) initialization functions */
129 134 void ehci_set_dma_attributes(ehci_state_t *ehcip);
130 135 int ehci_allocate_pools(ehci_state_t *ehcip);
131 136 void ehci_decode_ddi_dma_addr_bind_handle_result(
132 137 ehci_state_t *ehcip,
133 138 int result);
134 139 int ehci_map_regs(ehci_state_t *ehcip);
135 140 int ehci_register_intrs_and_init_mutex(
136 141 ehci_state_t *ehcip);
137 142 static int ehci_add_intrs(ehci_state_t *ehcip,
138 143 int intr_type);
139 144 int ehci_init_ctlr(ehci_state_t *ehcip,
140 145 int init_type);
141 146 static int ehci_take_control(ehci_state_t *ehcip);
142 147 static int ehci_init_periodic_frame_lst_table(
143 148 ehci_state_t *ehcip);
144 149 static void ehci_build_interrupt_lattice(
145 150 ehci_state_t *ehcip);
146 151 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t *ehcip);
147 152
148 153 /* Host Controller Driver (HCD) deinitialization functions */
149 154 int ehci_cleanup(ehci_state_t *ehcip);
150 155 static void ehci_rem_intrs(ehci_state_t *ehcip);
151 156 int ehci_cpr_suspend(ehci_state_t *ehcip);
152 157 int ehci_cpr_resume(ehci_state_t *ehcip);
153 158
154 159 /* Bandwidth Allocation functions */
155 160 int ehci_allocate_bandwidth(ehci_state_t *ehcip,
156 161 usba_pipe_handle_data_t *ph,
157 162 uint_t *pnode,
158 163 uchar_t *smask,
159 164 uchar_t *cmask);
160 165 static int ehci_allocate_high_speed_bandwidth(
161 166 ehci_state_t *ehcip,
162 167 usba_pipe_handle_data_t *ph,
163 168 uint_t *hnode,
164 169 uchar_t *smask,
165 170 uchar_t *cmask);
166 171 static int ehci_allocate_classic_tt_bandwidth(
167 172 ehci_state_t *ehcip,
168 173 usba_pipe_handle_data_t *ph,
169 174 uint_t pnode);
170 175 void ehci_deallocate_bandwidth(ehci_state_t *ehcip,
171 176 usba_pipe_handle_data_t *ph,
172 177 uint_t pnode,
173 178 uchar_t smask,
174 179 uchar_t cmask);
175 180 static void ehci_deallocate_high_speed_bandwidth(
176 181 ehci_state_t *ehcip,
177 182 usba_pipe_handle_data_t *ph,
178 183 uint_t hnode,
179 184 uchar_t smask,
180 185 uchar_t cmask);
181 186 static void ehci_deallocate_classic_tt_bandwidth(
182 187 ehci_state_t *ehcip,
183 188 usba_pipe_handle_data_t *ph,
184 189 uint_t pnode);
185 190 static int ehci_compute_high_speed_bandwidth(
186 191 ehci_state_t *ehcip,
187 192 usb_ep_descr_t *endpoint,
188 193 usb_port_status_t port_status,
189 194 uint_t *sbandwidth,
190 195 uint_t *cbandwidth);
191 196 static int ehci_compute_classic_bandwidth(
192 197 usb_ep_descr_t *endpoint,
193 198 usb_port_status_t port_status,
194 199 uint_t *bandwidth);
195 200 int ehci_adjust_polling_interval(
196 201 ehci_state_t *ehcip,
197 202 usb_ep_descr_t *endpoint,
198 203 usb_port_status_t port_status);
199 204 static int ehci_adjust_high_speed_polling_interval(
200 205 ehci_state_t *ehcip,
201 206 usb_ep_descr_t *endpoint);
202 207 static uint_t ehci_lattice_height(uint_t interval);
203 208 static uint_t ehci_lattice_parent(uint_t node);
204 209 static uint_t ehci_find_periodic_node(
205 210 uint_t leaf,
206 211 int interval);
207 212 static uint_t ehci_leftmost_leaf(uint_t node,
208 213 uint_t height);
209 214 static uint_t ehci_pow_2(uint_t x);
210 215 static uint_t ehci_log_2(uint_t x);
211 216 static int ehci_find_bestfit_hs_mask(
212 217 ehci_state_t *ehcip,
213 218 uchar_t *smask,
214 219 uint_t *pnode,
215 220 usb_ep_descr_t *endpoint,
216 221 uint_t bandwidth,
217 222 int interval);
218 223 static int ehci_find_bestfit_ls_intr_mask(
219 224 ehci_state_t *ehcip,
220 225 uchar_t *smask,
221 226 uchar_t *cmask,
222 227 uint_t *pnode,
223 228 uint_t sbandwidth,
224 229 uint_t cbandwidth,
225 230 int interval);
226 231 static int ehci_find_bestfit_sitd_in_mask(
227 232 ehci_state_t *ehcip,
228 233 uchar_t *smask,
229 234 uchar_t *cmask,
230 235 uint_t *pnode,
231 236 uint_t sbandwidth,
232 237 uint_t cbandwidth,
233 238 int interval);
234 239 static int ehci_find_bestfit_sitd_out_mask(
235 240 ehci_state_t *ehcip,
236 241 uchar_t *smask,
237 242 uint_t *pnode,
238 243 uint_t sbandwidth,
239 244 int interval);
240 245 static uint_t ehci_calculate_bw_availability_mask(
241 246 ehci_state_t *ehcip,
242 247 uint_t bandwidth,
243 248 int leaf,
244 249 int leaf_count,
245 250 uchar_t *bw_mask);
246 251 static void ehci_update_bw_availability(
247 252 ehci_state_t *ehcip,
248 253 int bandwidth,
249 254 int leftmost_leaf,
250 255 int leaf_count,
251 256 uchar_t mask);
252 257
253 258 /* Miscellaneous functions */
254 259 ehci_state_t *ehci_obtain_state(
255 260 dev_info_t *dip);
256 261 int ehci_state_is_operational(
257 262 ehci_state_t *ehcip);
258 263 int ehci_do_soft_reset(
259 264 ehci_state_t *ehcip);
260 265 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t *ehcip,
261 266 ehci_pipe_private_t *pp,
262 267 ehci_trans_wrapper_t *tw);
263 268 usb_frame_number_t ehci_get_current_frame_number(
264 269 ehci_state_t *ehcip);
265 270 static void ehci_cpr_cleanup(
266 271 ehci_state_t *ehcip);
267 272 int ehci_wait_for_sof(
268 273 ehci_state_t *ehcip);
269 274 void ehci_toggle_scheduler(
270 275 ehci_state_t *ehcip);
271 276 void ehci_print_caps(ehci_state_t *ehcip);
272 277 void ehci_print_regs(ehci_state_t *ehcip);
273 278 void ehci_print_qh(ehci_state_t *ehcip,
274 279 ehci_qh_t *qh);
275 280 void ehci_print_qtd(ehci_state_t *ehcip,
276 281 ehci_qtd_t *qtd);
277 282 void ehci_create_stats(ehci_state_t *ehcip);
278 283 void ehci_destroy_stats(ehci_state_t *ehcip);
279 284 void ehci_do_intrs_stats(ehci_state_t *ehcip,
280 285 int val);
281 286 void ehci_do_byte_stats(ehci_state_t *ehcip,
282 287 size_t len,
283 288 uint8_t attr,
284 289 uint8_t addr);
285 290
286 291 /*
287 292 * check if this ehci controller can support PM
288 293 */
289 294 int
290 295 ehci_hcdi_pm_support(dev_info_t *dip)
291 296 {
292 297 ehci_state_t *ehcip = ddi_get_soft_state(ehci_statep,
293 298 ddi_get_instance(dip));
294 299
295 300 if (((ehcip->ehci_vendor_id == PCI_VENDOR_NEC_COMBO) &&
296 301 (ehcip->ehci_device_id == PCI_DEVICE_NEC_COMBO)) ||
297 302
298 303 ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
299 304 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) ||
300 305
301 306 (ehcip->ehci_vendor_id == PCI_VENDOR_VIA)) {
302 307
303 308 return (USB_SUCCESS);
304 309 }
305 310
306 311 return (USB_FAILURE);
307 312 }
308 313
309 314 void
310 315 ehci_dma_attr_workaround(ehci_state_t *ehcip)
311 316 {
312 317 /*
313 318 * Some Nvidia chips can not handle qh dma address above 2G.
314 319 * The bit 31 of the dma address might be omitted and it will
315 320 * cause system crash or other unpredicable result. So force
316 321 * the dma address allocated below 2G to make ehci work.
317 322 */
318 323 if (PCI_VENDOR_NVIDIA == ehcip->ehci_vendor_id) {
319 324 switch (ehcip->ehci_device_id) {
320 325 case PCI_DEVICE_NVIDIA_CK804:
321 326 case PCI_DEVICE_NVIDIA_MCP04:
322 327 USB_DPRINTF_L2(PRINT_MASK_ATTA,
323 328 ehcip->ehci_log_hdl,
324 329 "ehci_dma_attr_workaround: NVIDIA dma "
325 330 "workaround enabled, force dma address "
326 331 "to be allocated below 2G");
327 332 ehcip->ehci_dma_attr.dma_attr_addr_hi =
328 333 0x7fffffffull;
329 334 break;
330 335 default:
331 336 break;
332 337
333 338 }
334 339 }
335 340 }
336 341
337 342 /*
338 343 * Host Controller Driver (HCD) initialization functions
339 344 */
340 345
341 346 /*
342 347 * ehci_set_dma_attributes:
343 348 *
344 349 * Set the limits in the DMA attributes structure. Most of the values used
345 350 * in the DMA limit structures are the default values as specified by the
346 351 * Writing PCI device drivers document.
347 352 */
348 353 void
349 354 ehci_set_dma_attributes(ehci_state_t *ehcip)
350 355 {
351 356 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
352 357 "ehci_set_dma_attributes:");
353 358
354 359 /* Initialize the DMA attributes */
355 360 ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0;
356 361 ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull;
357 362 ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull;
358 363
359 364 /* 32 bit addressing */
360 365 ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX;
361 366
362 367 /* Byte alignment */
363 368 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
364 369
365 370 /*
366 371 * Since PCI specification is byte alignment, the
367 372 * burst size field should be set to 1 for PCI devices.
368 373 */
369 374 ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1;
370 375
371 376 ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1;
372 377 ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER;
373 378 ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull;
374 379 ehcip->ehci_dma_attr.dma_attr_sgllen = 1;
375 380 ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR;
376 381 ehcip->ehci_dma_attr.dma_attr_flags = 0;
377 382 ehci_dma_attr_workaround(ehcip);
378 383 }
379 384
380 385
381 386 /*
382 387 * ehci_allocate_pools:
383 388 *
384 389 * Allocate the system memory for the Endpoint Descriptor (QH) and for the
385 390 * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned
386 391 * to a 16 byte boundary.
387 392 */
388 393 int
389 394 ehci_allocate_pools(ehci_state_t *ehcip)
390 395 {
391 396 ddi_device_acc_attr_t dev_attr;
392 397 size_t real_length;
393 398 int result;
394 399 uint_t ccount;
395 400 int i;
396 401
397 402 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
398 403 "ehci_allocate_pools:");
399 404
400 405 /* The host controller will be little endian */
401 406 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
402 407 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
403 408 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
404 409
405 410 /* Byte alignment */
406 411 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT;
407 412
408 413 /* Allocate the QTD pool DMA handle */
409 414 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
410 415 DDI_DMA_SLEEP, 0,
411 416 &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) {
412 417
413 418 goto failure;
414 419 }
415 420
416 421 /* Allocate the memory for the QTD pool */
417 422 if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle,
418 423 ehci_qtd_pool_size * sizeof (ehci_qtd_t),
419 424 &dev_attr,
420 425 DDI_DMA_CONSISTENT,
421 426 DDI_DMA_SLEEP,
422 427 0,
423 428 (caddr_t *)&ehcip->ehci_qtd_pool_addr,
424 429 &real_length,
425 430 &ehcip->ehci_qtd_pool_mem_handle)) {
426 431
427 432 goto failure;
428 433 }
429 434
430 435 /* Map the QTD pool into the I/O address space */
431 436 result = ddi_dma_addr_bind_handle(
432 437 ehcip->ehci_qtd_pool_dma_handle,
433 438 NULL,
434 439 (caddr_t)ehcip->ehci_qtd_pool_addr,
435 440 real_length,
436 441 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
437 442 DDI_DMA_SLEEP,
438 443 NULL,
439 444 &ehcip->ehci_qtd_pool_cookie,
440 445 &ccount);
441 446
442 447 bzero((void *)ehcip->ehci_qtd_pool_addr,
443 448 ehci_qtd_pool_size * sizeof (ehci_qtd_t));
444 449
445 450 /* Process the result */
446 451 if (result == DDI_DMA_MAPPED) {
447 452 /* The cookie count should be 1 */
448 453 if (ccount != 1) {
449 454 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
450 455 "ehci_allocate_pools: More than 1 cookie");
451 456
452 457 goto failure;
453 458 }
454 459 } else {
455 460 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
456 461 "ehci_allocate_pools: Result = %d", result);
457 462
458 463 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
459 464
460 465 goto failure;
461 466 }
462 467
463 468 /*
464 469 * DMA addresses for QTD pools are bound
465 470 */
466 471 ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND;
467 472
468 473 /* Initialize the QTD pool */
469 474 for (i = 0; i < ehci_qtd_pool_size; i ++) {
470 475 Set_QTD(ehcip->ehci_qtd_pool_addr[i].
471 476 qtd_state, EHCI_QTD_FREE);
472 477 }
473 478
474 479 /* Allocate the QTD pool DMA handle */
475 480 if (ddi_dma_alloc_handle(ehcip->ehci_dip,
476 481 &ehcip->ehci_dma_attr,
477 482 DDI_DMA_SLEEP,
478 483 0,
479 484 &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) {
480 485 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
481 486 "ehci_allocate_pools: ddi_dma_alloc_handle failed");
482 487
483 488 goto failure;
484 489 }
485 490
486 491 /* Allocate the memory for the QH pool */
487 492 if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle,
488 493 ehci_qh_pool_size * sizeof (ehci_qh_t),
489 494 &dev_attr,
490 495 DDI_DMA_CONSISTENT,
491 496 DDI_DMA_SLEEP,
492 497 0,
493 498 (caddr_t *)&ehcip->ehci_qh_pool_addr,
494 499 &real_length,
495 500 &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) {
496 501 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
497 502 "ehci_allocate_pools: ddi_dma_mem_alloc failed");
498 503
499 504 goto failure;
500 505 }
501 506
502 507 result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle,
503 508 NULL,
504 509 (caddr_t)ehcip->ehci_qh_pool_addr,
505 510 real_length,
506 511 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
507 512 DDI_DMA_SLEEP,
508 513 NULL,
509 514 &ehcip->ehci_qh_pool_cookie,
510 515 &ccount);
511 516
512 517 bzero((void *)ehcip->ehci_qh_pool_addr,
513 518 ehci_qh_pool_size * sizeof (ehci_qh_t));
514 519
515 520 /* Process the result */
516 521 if (result == DDI_DMA_MAPPED) {
517 522 /* The cookie count should be 1 */
518 523 if (ccount != 1) {
519 524 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
520 525 "ehci_allocate_pools: More than 1 cookie");
521 526
522 527 goto failure;
523 528 }
524 529 } else {
525 530 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
526 531
527 532 goto failure;
528 533 }
529 534
530 535 /*
531 536 * DMA addresses for QH pools are bound
532 537 */
533 538 ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND;
534 539
535 540 /* Initialize the QH pool */
536 541 for (i = 0; i < ehci_qh_pool_size; i ++) {
537 542 Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE);
538 543 }
539 544
540 545 /* Byte alignment */
541 546 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
542 547
543 548 return (DDI_SUCCESS);
544 549
545 550 failure:
546 551 /* Byte alignment */
547 552 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
548 553
549 554 return (DDI_FAILURE);
550 555 }
551 556
552 557
553 558 /*
554 559 * ehci_decode_ddi_dma_addr_bind_handle_result:
555 560 *
556 561 * Process the return values of ddi_dma_addr_bind_handle()
557 562 */
558 563 void
559 564 ehci_decode_ddi_dma_addr_bind_handle_result(
560 565 ehci_state_t *ehcip,
561 566 int result)
562 567 {
563 568 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
564 569 "ehci_decode_ddi_dma_addr_bind_handle_result:");
565 570
566 571 switch (result) {
567 572 case DDI_DMA_PARTIAL_MAP:
568 573 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
569 574 "Partial transfers not allowed");
570 575 break;
571 576 case DDI_DMA_INUSE:
572 577 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
573 578 "Handle is in use");
574 579 break;
575 580 case DDI_DMA_NORESOURCES:
576 581 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
577 582 "No resources");
578 583 break;
579 584 case DDI_DMA_NOMAPPING:
580 585 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
581 586 "No mapping");
582 587 break;
583 588 case DDI_DMA_TOOBIG:
584 589 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
585 590 "Object is too big");
586 591 break;
587 592 default:
588 593 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
589 594 "Unknown dma error");
590 595 }
591 596 }
592 597
593 598
594 599 /*
595 600 * ehci_map_regs:
596 601 *
597 602 * The Host Controller (HC) contains a set of on-chip operational registers
598 603 * and which should be mapped into a non-cacheable portion of the system
599 604 * addressable space.
600 605 */
601 606 int
602 607 ehci_map_regs(ehci_state_t *ehcip)
603 608 {
604 609 ddi_device_acc_attr_t attr;
605 610 uint16_t cmd_reg;
606 611 uint_t length;
607 612
608 613 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:");
609 614
610 615 /* Check to make sure we have memory access */
611 616 if (pci_config_setup(ehcip->ehci_dip,
612 617 &ehcip->ehci_config_handle) != DDI_SUCCESS) {
613 618
614 619 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
615 620 "ehci_map_regs: Config error");
616 621
617 622 return (DDI_FAILURE);
618 623 }
619 624
620 625 /* Make sure Memory Access Enable is set */
621 626 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
622 627
623 628 if (!(cmd_reg & PCI_COMM_MAE)) {
624 629
625 630 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
626 631 "ehci_map_regs: Memory base address access disabled");
627 632
628 633 return (DDI_FAILURE);
629 634 }
630 635
631 636 /* The host controller will be little endian */
632 637 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
633 638 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
634 639 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
635 640
636 641 /* Map in EHCI Capability registers */
637 642 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
638 643 (caddr_t *)&ehcip->ehci_capsp, 0,
639 644 sizeof (ehci_caps_t), &attr,
640 645 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
641 646
642 647 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
643 648 "ehci_map_regs: Map setup error");
644 649
645 650 return (DDI_FAILURE);
646 651 }
647 652
648 653 length = ddi_get8(ehcip->ehci_caps_handle,
649 654 (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length);
650 655
651 656 /* Free the original mapping */
652 657 ddi_regs_map_free(&ehcip->ehci_caps_handle);
653 658
654 659 /* Re-map in EHCI Capability and Operational registers */
655 660 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
656 661 (caddr_t *)&ehcip->ehci_capsp, 0,
657 662 length + sizeof (ehci_regs_t), &attr,
658 663 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
659 664
660 665 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
661 666 "ehci_map_regs: Map setup error");
662 667
663 668 return (DDI_FAILURE);
664 669 }
665 670
666 671 /* Get the pointer to EHCI Operational Register */
667 672 ehcip->ehci_regsp = (ehci_regs_t *)
668 673 ((uintptr_t)ehcip->ehci_capsp + length);
669 674
670 675 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
671 676 "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n",
672 677 (void *)ehcip->ehci_capsp, (void *)ehcip->ehci_regsp);
673 678
674 679 return (DDI_SUCCESS);
675 680 }
676 681
677 682 /*
678 683 * The following simulated polling is for debugging purposes only.
679 684 * It is activated on x86 by setting usb-polling=true in GRUB or ehci.conf.
680 685 */
681 686 static int
682 687 ehci_is_polled(dev_info_t *dip)
683 688 {
684 689 int ret;
685 690 char *propval;
686 691
687 692 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
688 693 "usb-polling", &propval) != DDI_SUCCESS)
689 694
690 695 return (0);
691 696
692 697 ret = (strcmp(propval, "true") == 0);
693 698 ddi_prop_free(propval);
694 699
695 700 return (ret);
696 701 }
697 702
698 703 static void
699 704 ehci_poll_intr(void *arg)
700 705 {
701 706 /* poll every msec */
702 707 for (;;) {
703 708 (void) ehci_intr(arg, NULL);
704 709 delay(drv_usectohz(1000));
705 710 }
706 711 }
707 712
708 713 /*
709 714 * ehci_register_intrs_and_init_mutex:
710 715 *
711 716 * Register interrupts and initialize each mutex and condition variables
712 717 */
713 718 int
714 719 ehci_register_intrs_and_init_mutex(ehci_state_t *ehcip)
715 720 {
716 721 int intr_types;
717 722
718 723 #if defined(__x86)
719 724 uint8_t iline;
720 725 #endif
721 726
722 727 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
723 728 "ehci_register_intrs_and_init_mutex:");
724 729
725 730 /*
726 731 * There is a known MSI hardware bug with the EHCI controller
727 732 * of ULI1575 southbridge. Hence MSI is disabled for this chip.
728 733 */
729 734 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
730 735 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
731 736 ehcip->ehci_msi_enabled = B_FALSE;
732 737 } else {
733 738 /* Set the MSI enable flag from the global EHCI MSI tunable */
734 739 ehcip->ehci_msi_enabled = ehci_enable_msi;
735 740 }
736 741
737 742 /* launch polling thread instead of enabling pci interrupt */
|
↓ open down ↓ |
701 lines elided |
↑ open up ↑ |
738 743 if (ehci_is_polled(ehcip->ehci_dip)) {
739 744 extern pri_t maxclsyspri;
740 745
741 746 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
742 747 "ehci_register_intrs_and_init_mutex: "
743 748 "running in simulated polled mode");
744 749
745 750 (void) thread_create(NULL, 0, ehci_poll_intr, ehcip, 0, &p0,
746 751 TS_RUN, maxclsyspri);
747 752
748 - goto skip_intr;
753 + return (DDI_SUCCESS);
749 754 }
750 755
751 756 #if defined(__x86)
752 757 /*
753 758 * Make sure that the interrupt pin is connected to the
754 759 * interrupt controller on x86. Interrupt line 255 means
755 760 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43).
756 761 * If we would return failure when interrupt line equals 255, then
757 762 * high speed devices will be routed to companion host controllers.
758 763 * However, it is not necessary to return failure here, and
759 764 * o/uhci codes don't check the interrupt line either.
760 765 * But it's good to log a message here for debug purposes.
761 766 */
762 767 iline = pci_config_get8(ehcip->ehci_config_handle,
763 768 PCI_CONF_ILINE);
764 769
765 770 if (iline == 255) {
766 771 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
767 772 "ehci_register_intrs_and_init_mutex: "
768 773 "interrupt line value out of range (%d)",
769 774 iline);
770 775 }
771 776 #endif /* __x86 */
772 777
773 778 /* Get supported interrupt types */
774 779 if (ddi_intr_get_supported_types(ehcip->ehci_dip,
775 780 &intr_types) != DDI_SUCCESS) {
776 781 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
777 782 "ehci_register_intrs_and_init_mutex: "
778 783 "ddi_intr_get_supported_types failed");
779 784
780 785 return (DDI_FAILURE);
781 786 }
782 787
783 788 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
784 789 "ehci_register_intrs_and_init_mutex: "
785 790 "supported interrupt types 0x%x", intr_types);
786 791
787 792 if ((intr_types & DDI_INTR_TYPE_MSI) && ehcip->ehci_msi_enabled) {
788 793 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_MSI)
789 794 != DDI_SUCCESS) {
790 795 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
791 796 "ehci_register_intrs_and_init_mutex: MSI "
792 797 "registration failed, trying FIXED interrupt \n");
793 798 } else {
794 799 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
795 800 "ehci_register_intrs_and_init_mutex: "
796 801 "Using MSI interrupt type\n");
797 802
798 803 ehcip->ehci_intr_type = DDI_INTR_TYPE_MSI;
799 804 ehcip->ehci_flags |= EHCI_INTR;
800 805 }
801 806 }
802 807
803 808 if ((!(ehcip->ehci_flags & EHCI_INTR)) &&
804 809 (intr_types & DDI_INTR_TYPE_FIXED)) {
805 810 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_FIXED)
806 811 != DDI_SUCCESS) {
807 812 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
808 813 "ehci_register_intrs_and_init_mutex: "
809 814 "FIXED interrupt registration failed\n");
810 815
811 816 return (DDI_FAILURE);
|
↓ open down ↓ |
53 lines elided |
↑ open up ↑ |
812 817 }
813 818
814 819 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
815 820 "ehci_register_intrs_and_init_mutex: "
816 821 "Using FIXED interrupt type\n");
817 822
818 823 ehcip->ehci_intr_type = DDI_INTR_TYPE_FIXED;
819 824 ehcip->ehci_flags |= EHCI_INTR;
820 825 }
821 826
822 -skip_intr:
823 - /* Create prototype for advance on async schedule */
824 - cv_init(&ehcip->ehci_async_schedule_advance_cv,
825 - NULL, CV_DRIVER, NULL);
826 -
827 827 return (DDI_SUCCESS);
828 828 }
829 829
830 830
831 831 /*
832 832 * ehci_add_intrs:
833 833 *
834 834 * Register FIXED or MSI interrupts.
835 835 */
836 836 static int
837 -ehci_add_intrs(ehci_state_t *ehcip,
838 - int intr_type)
837 +ehci_add_intrs(ehci_state_t *ehcip, int intr_type)
839 838 {
840 839 int actual, avail, intr_size, count = 0;
841 840 int i, flag, ret;
842 841
843 842 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
844 843 "ehci_add_intrs: interrupt type 0x%x", intr_type);
845 844
846 845 /* Get number of interrupts */
847 846 ret = ddi_intr_get_nintrs(ehcip->ehci_dip, intr_type, &count);
848 847 if ((ret != DDI_SUCCESS) || (count == 0)) {
849 848 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
850 849 "ehci_add_intrs: ddi_intr_get_nintrs() failure, "
851 850 "ret: %d, count: %d", ret, count);
852 851
853 852 return (DDI_FAILURE);
854 853 }
855 854
856 855 /* Get number of available interrupts */
857 856 ret = ddi_intr_get_navail(ehcip->ehci_dip, intr_type, &avail);
858 857 if ((ret != DDI_SUCCESS) || (avail == 0)) {
859 858 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
860 859 "ehci_add_intrs: ddi_intr_get_navail() failure, "
861 860 "ret: %d, count: %d", ret, count);
862 861
863 862 return (DDI_FAILURE);
864 863 }
865 864
866 865 if (avail < count) {
867 866 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
868 867 "ehci_add_intrs: ehci_add_intrs: nintrs () "
869 868 "returned %d, navail returned %d\n", count, avail);
870 869 }
871 870
872 871 /* Allocate an array of interrupt handles */
873 872 intr_size = count * sizeof (ddi_intr_handle_t);
874 873 ehcip->ehci_htable = kmem_zalloc(intr_size, KM_SLEEP);
875 874
876 875 flag = (intr_type == DDI_INTR_TYPE_MSI) ?
877 876 DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL;
878 877
879 878 /* call ddi_intr_alloc() */
880 879 ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable,
881 880 intr_type, 0, count, &actual, flag);
882 881
883 882 if ((ret != DDI_SUCCESS) || (actual == 0)) {
884 883 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
885 884 "ehci_add_intrs: ddi_intr_alloc() failed %d", ret);
886 885
887 886 kmem_free(ehcip->ehci_htable, intr_size);
888 887
889 888 return (DDI_FAILURE);
890 889 }
891 890
892 891 if (actual < count) {
893 892 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
894 893 "ehci_add_intrs: Requested: %d, Received: %d\n",
895 894 count, actual);
896 895
897 896 for (i = 0; i < actual; i++)
898 897 (void) ddi_intr_free(ehcip->ehci_htable[i]);
899 898
900 899 kmem_free(ehcip->ehci_htable, intr_size);
901 900
902 901 return (DDI_FAILURE);
903 902 }
904 903
905 904 ehcip->ehci_intr_cnt = actual;
906 905
907 906 if ((ret = ddi_intr_get_pri(ehcip->ehci_htable[0],
908 907 &ehcip->ehci_intr_pri)) != DDI_SUCCESS) {
909 908 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
910 909 "ehci_add_intrs: ddi_intr_get_pri() failed %d", ret);
911 910
912 911 for (i = 0; i < actual; i++)
913 912 (void) ddi_intr_free(ehcip->ehci_htable[i]);
914 913
915 914 kmem_free(ehcip->ehci_htable, intr_size);
916 915
917 916 return (DDI_FAILURE);
918 917 }
919 918
920 919 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
921 920 "ehci_add_intrs: Supported Interrupt priority 0x%x",
922 921 ehcip->ehci_intr_pri);
923 922
924 923 /* Test for high level mutex */
925 924 if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) {
926 925 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
927 926 "ehci_add_intrs: Hi level interrupt not supported");
928 927
929 928 for (i = 0; i < actual; i++)
930 929 (void) ddi_intr_free(ehcip->ehci_htable[i]);
931 930
932 931 kmem_free(ehcip->ehci_htable, intr_size);
933 932
934 933 return (DDI_FAILURE);
935 934 }
936 935
937 936 /* Initialize the mutex */
938 937 mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER,
939 938 DDI_INTR_PRI(ehcip->ehci_intr_pri));
940 939
941 940 /* Call ddi_intr_add_handler() */
942 941 for (i = 0; i < actual; i++) {
943 942 if ((ret = ddi_intr_add_handler(ehcip->ehci_htable[i],
944 943 ehci_intr, (caddr_t)ehcip,
945 944 (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
946 945 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
947 946 "ehci_add_intrs:ddi_intr_add_handler() "
948 947 "failed %d", ret);
949 948
950 949 for (i = 0; i < actual; i++)
951 950 (void) ddi_intr_free(ehcip->ehci_htable[i]);
952 951
953 952 mutex_destroy(&ehcip->ehci_int_mutex);
954 953 kmem_free(ehcip->ehci_htable, intr_size);
955 954
956 955 return (DDI_FAILURE);
957 956 }
958 957 }
959 958
960 959 if ((ret = ddi_intr_get_cap(ehcip->ehci_htable[0],
961 960 &ehcip->ehci_intr_cap)) != DDI_SUCCESS) {
962 961 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
963 962 "ehci_add_intrs: ddi_intr_get_cap() failed %d", ret);
964 963
965 964 for (i = 0; i < actual; i++) {
966 965 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
967 966 (void) ddi_intr_free(ehcip->ehci_htable[i]);
968 967 }
969 968
970 969 mutex_destroy(&ehcip->ehci_int_mutex);
971 970 kmem_free(ehcip->ehci_htable, intr_size);
972 971
973 972 return (DDI_FAILURE);
974 973 }
975 974
976 975 /* Enable all interrupts */
977 976 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
978 977 /* Call ddi_intr_block_enable() for MSI interrupts */
979 978 (void) ddi_intr_block_enable(ehcip->ehci_htable,
980 979 ehcip->ehci_intr_cnt);
981 980 } else {
982 981 /* Call ddi_intr_enable for MSI or FIXED interrupts */
983 982 for (i = 0; i < ehcip->ehci_intr_cnt; i++)
984 983 (void) ddi_intr_enable(ehcip->ehci_htable[i]);
985 984 }
986 985
987 986 return (DDI_SUCCESS);
988 987 }
989 988
990 989
991 990 /*
992 991 * ehci_init_hardware
993 992 *
994 993 * take control from BIOS, reset EHCI host controller, and check version, etc.
995 994 */
996 995 int
997 996 ehci_init_hardware(ehci_state_t *ehcip)
998 997 {
999 998 int revision;
1000 999 uint16_t cmd_reg;
1001 1000 int abort_on_BIOS_take_over_failure;
1002 1001
1003 1002 /* Take control from the BIOS */
1004 1003 if (ehci_take_control(ehcip) != USB_SUCCESS) {
1005 1004
1006 1005 /* read .conf file properties */
1007 1006 abort_on_BIOS_take_over_failure =
1008 1007 ddi_prop_get_int(DDI_DEV_T_ANY,
1009 1008 ehcip->ehci_dip, DDI_PROP_DONTPASS,
1010 1009 "abort-on-BIOS-take-over-failure", 0);
1011 1010
1012 1011 if (abort_on_BIOS_take_over_failure) {
1013 1012
1014 1013 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1015 1014 "Unable to take control from BIOS.");
1016 1015
1017 1016 return (DDI_FAILURE);
1018 1017 }
1019 1018
1020 1019 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1021 1020 "Unable to take control from BIOS. Failure is ignored.");
1022 1021 }
1023 1022
1024 1023 /* set Memory Master Enable */
1025 1024 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
1026 1025 cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME);
1027 1026 pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg);
1028 1027
1029 1028 /* Reset the EHCI host controller */
1030 1029 Set_OpReg(ehci_command,
1031 1030 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET);
1032 1031
1033 1032 /* Wait 10ms for reset to complete */
1034 1033 drv_usecwait(EHCI_RESET_TIMEWAIT);
1035 1034
1036 1035 ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED);
1037 1036
1038 1037 /* Verify the version number */
1039 1038 revision = Get_16Cap(ehci_version);
1040 1039
1041 1040 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1042 1041 "ehci_init_hardware: Revision 0x%x", revision);
1043 1042
1044 1043 /*
1045 1044 * EHCI driver supports EHCI host controllers compliant to
1046 1045 * 0.95 and higher revisions of EHCI specifications.
1047 1046 */
1048 1047 if (revision < EHCI_REVISION_0_95) {
1049 1048
1050 1049 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1051 1050 "Revision 0x%x is not supported", revision);
1052 1051
1053 1052 return (DDI_FAILURE);
1054 1053 }
1055 1054
1056 1055 if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) {
1057 1056
1058 1057 /* Initialize the Frame list base address area */
1059 1058 if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) {
1060 1059
1061 1060 return (DDI_FAILURE);
1062 1061 }
1063 1062
1064 1063 /*
1065 1064 * For performance reasons, do not insert anything into the
1066 1065 * asynchronous list or activate the asynch list schedule until
1067 1066 * there is a valid QH.
1068 1067 */
1069 1068 ehcip->ehci_head_of_async_sched_list = NULL;
1070 1069
1071 1070 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) &&
1072 1071 (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) {
1073 1072 /*
1074 1073 * The driver is unable to reliably stop the asynch
1075 1074 * list schedule on VIA VT6202 controllers, so we
1076 1075 * always keep a dummy QH on the list.
1077 1076 */
1078 1077 ehci_qh_t *dummy_async_qh =
1079 1078 ehci_alloc_qh(ehcip, NULL, NULL);
1080 1079
1081 1080 Set_QH(dummy_async_qh->qh_link_ptr,
1082 1081 ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) &
1083 1082 EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH));
1084 1083
1085 1084 /* Set this QH to be the "head" of the circular list */
1086 1085 Set_QH(dummy_async_qh->qh_ctrl,
1087 1086 Get_QH(dummy_async_qh->qh_ctrl) |
1088 1087 EHCI_QH_CTRL_RECLAIM_HEAD);
1089 1088
1090 1089 Set_QH(dummy_async_qh->qh_next_qtd,
1091 1090 EHCI_QH_NEXT_QTD_PTR_VALID);
1092 1091 Set_QH(dummy_async_qh->qh_alt_next_qtd,
1093 1092 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1094 1093
1095 1094 ehcip->ehci_head_of_async_sched_list = dummy_async_qh;
1096 1095 ehcip->ehci_open_async_count++;
1097 1096 ehcip->ehci_async_req_count++;
1098 1097 }
1099 1098 }
1100 1099
1101 1100 return (DDI_SUCCESS);
1102 1101 }
1103 1102
1104 1103
1105 1104 /*
1106 1105 * ehci_init_workaround
1107 1106 *
1108 1107 * some workarounds during initializing ehci
1109 1108 */
1110 1109 int
1111 1110 ehci_init_workaround(ehci_state_t *ehcip)
1112 1111 {
1113 1112 /*
1114 1113 * Acer Labs Inc. M5273 EHCI controller does not send
1115 1114 * interrupts unless the Root hub ports are routed to the EHCI
1116 1115 * host controller; so route the ports now, before we test for
1117 1116 * the presence of SOFs interrupts.
1118 1117 */
1119 1118 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1120 1119 /* Route all Root hub ports to EHCI host controller */
1121 1120 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1122 1121 }
1123 1122
1124 1123 /*
1125 1124 * VIA chips have some issues and may not work reliably.
1126 1125 * Revisions >= 0x80 are part of a southbridge and appear
1127 1126 * to be reliable with the workaround.
1128 1127 * For revisions < 0x80, if we were bound using class
1129 1128 * complain, else proceed. This will allow the user to
1130 1129 * bind ehci specifically to this chip and not have the
1131 1130 * warnings
1132 1131 */
1133 1132 if (ehcip->ehci_vendor_id == PCI_VENDOR_VIA) {
1134 1133
1135 1134 if (ehcip->ehci_rev_id >= PCI_VIA_REVISION_6212) {
1136 1135
1137 1136 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1138 1137 "ehci_init_workaround: Applying VIA workarounds "
1139 1138 "for the 6212 chip.");
1140 1139
1141 1140 } else if (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name,
1142 1141 "pciclass,0c0320") == 0) {
1143 1142
1144 1143 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1145 1144 "Due to recently discovered incompatibilities");
1146 1145 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1147 1146 "with this USB controller, USB2.x transfer");
1148 1147 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1149 1148 "support has been disabled. This device will");
1150 1149 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1151 1150 "continue to function as a USB1.x controller.");
1152 1151 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1153 1152 "If you are interested in enabling USB2.x");
1154 1153 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1155 1154 "support please, refer to the ehci(7D) man page.");
1156 1155 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1157 1156 "Please also refer to www.sun.com/io for");
1158 1157 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1159 1158 "Solaris Ready products and to");
1160 1159 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1161 1160 "www.sun.com/bigadmin/hcl for additional");
1162 1161 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1163 1162 "compatible USB products.");
1164 1163
1165 1164 return (DDI_FAILURE);
1166 1165
|
↓ open down ↓ |
318 lines elided |
↑ open up ↑ |
1167 1166 } else if (ehci_vt62x2_workaround) {
1168 1167
1169 1168 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1170 1169 "Applying VIA workarounds");
1171 1170 }
1172 1171 }
1173 1172
1174 1173 return (DDI_SUCCESS);
1175 1174 }
1176 1175
1177 -
1178 1176 /*
1179 - * ehci_init_check_status
1180 - *
1181 - * Check if EHCI host controller is running
1182 - */
1183 -int
1184 -ehci_init_check_status(ehci_state_t *ehcip)
1185 -{
1186 - clock_t sof_time_wait;
1187 -
1188 - /*
1189 - * Get the number of clock ticks to wait.
1190 - * This is based on the maximum time it takes for a frame list rollover
1191 - * and maximum time wait for SOFs to begin.
1192 - */
1193 - sof_time_wait = drv_usectohz((EHCI_NUM_PERIODIC_FRAME_LISTS * 1000) +
1194 - EHCI_SOF_TIMEWAIT);
1195 -
1196 - /* Tell the ISR to broadcast ehci_async_schedule_advance_cv */
1197 - ehcip->ehci_flags |= EHCI_CV_INTR;
1198 -
1199 - /* We need to add a delay to allow the chip time to start running */
1200 - (void) cv_reltimedwait(&ehcip->ehci_async_schedule_advance_cv,
1201 - &ehcip->ehci_int_mutex, sof_time_wait, TR_CLOCK_TICK);
1202 -
1203 - /*
1204 - * Check EHCI host controller is running, otherwise return failure.
1205 - */
1206 - if ((ehcip->ehci_flags & EHCI_CV_INTR) ||
1207 - (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
1208 -
1209 - USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1210 - "No SOF interrupts have been received, this USB EHCI host"
1211 - "controller is unusable");
1212 -
1213 - /*
1214 - * Route all Root hub ports to Classic host
1215 - * controller, in case this is an unusable ALI M5273
1216 - * EHCI controller.
1217 - */
1218 - if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1219 - Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1220 - }
1221 -
1222 - return (DDI_FAILURE);
1223 - }
1224 -
1225 - return (DDI_SUCCESS);
1226 -}
1227 -
1228 -
1229 -/*
1230 1177 * ehci_init_ctlr:
1231 1178 *
1232 1179 * Initialize the Host Controller (HC).
1233 1180 */
1234 1181 int
1235 -ehci_init_ctlr(ehci_state_t *ehcip,
1236 - int init_type)
1182 +ehci_init_ctlr(ehci_state_t *ehcip, int init_type)
1237 1183 {
1238 1184 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:");
1239 1185
1240 1186 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1241 1187
1242 1188 if (ehci_init_hardware(ehcip) != DDI_SUCCESS) {
1243 1189
1244 1190 return (DDI_FAILURE);
1245 1191 }
1246 1192 }
1247 1193
1248 1194 /*
1249 1195 * Check for Asynchronous schedule park capability feature. If this
1250 1196 * feature is supported, then, program ehci command register with
1251 1197 * appropriate values..
1252 1198 */
1253 1199 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) {
1254 1200
1255 1201 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1256 1202 "ehci_init_ctlr: Async park mode is supported");
1257 1203
1258 1204 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1259 1205 (EHCI_CMD_ASYNC_PARK_ENABLE |
1260 1206 EHCI_CMD_ASYNC_PARK_COUNT_3)));
1261 1207 }
1262 1208
1263 1209 /*
1264 1210 * Check for programmable periodic frame list feature. If this
1265 1211 * feature is supported, then, program ehci command register with
1266 1212 * 1024 frame list value.
1267 1213 */
1268 1214 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) {
1269 1215
1270 1216 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1271 1217 "ehci_init_ctlr: Variable programmable periodic "
1272 1218 "frame list is supported");
1273 1219
1274 1220 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1275 1221 EHCI_CMD_FRAME_1024_SIZE));
1276 1222 }
1277 1223
1278 1224 /*
1279 1225 * Currently EHCI driver doesn't support 64 bit addressing.
1280 1226 *
1281 1227 * If we are using 64 bit addressing capability, then, program
1282 1228 * ehci_ctrl_segment register with 4 Gigabyte segment where all
1283 1229 * of the interface data structures are allocated.
1284 1230 */
1285 1231 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) {
1286 1232
1287 1233 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1288 1234 "ehci_init_ctlr: EHCI driver doesn't support "
1289 1235 "64 bit addressing");
1290 1236 }
1291 1237
1292 1238 /* 64 bit addressing is not support */
1293 1239 Set_OpReg(ehci_ctrl_segment, 0x00000000);
1294 1240
1295 1241 /* Turn on/off the schedulers */
1296 1242 ehci_toggle_scheduler(ehcip);
1297 1243
1298 1244 /* Set host controller soft state to operational */
1299 1245 ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE;
1300 1246
1301 1247 /*
1302 1248 * Set the Periodic Frame List Base Address register with the
1303 1249 * starting physical address of the Periodic Frame List.
1304 1250 */
1305 1251 Set_OpReg(ehci_periodic_list_base,
1306 1252 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
1307 1253 EHCI_PERIODIC_LIST_BASE));
1308 1254
1309 1255 /*
1310 1256 * Set ehci_interrupt to enable all interrupts except Root
1311 1257 * Hub Status change interrupt.
1312 1258 */
1313 1259 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
1314 1260 EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR |
1315 1261 EHCI_INTR_USB);
1316 1262
|
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
1317 1263 /*
1318 1264 * Set the desired interrupt threshold and turn on EHCI host controller.
1319 1265 */
1320 1266 Set_OpReg(ehci_command,
1321 1267 ((Get_OpReg(ehci_command) & ~EHCI_CMD_INTR_THRESHOLD) |
1322 1268 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
1323 1269
1324 1270 ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN);
1325 1271
1326 1272 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1327 -
1328 1273 if (ehci_init_workaround(ehcip) != DDI_SUCCESS) {
1329 1274
1330 1275 /* Set host controller soft state to error */
1331 1276 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1332 1277
1333 1278 return (DDI_FAILURE);
1334 1279 }
1335 -
1336 - if (ehci_init_check_status(ehcip) != DDI_SUCCESS) {
1337 -
1338 - /* Set host controller soft state to error */
1339 - ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1340 -
1341 - return (DDI_FAILURE);
1342 - }
1343 -
1344 - USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1345 - "ehci_init_ctlr: SOF's have started");
1346 1280 }
1347 1281
1348 1282 /* Route all Root hub ports to EHCI host controller */
1349 1283 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1350 1284
1351 1285 return (DDI_SUCCESS);
1352 1286 }
1353 1287
1354 1288 /*
1355 1289 * ehci_take_control:
1356 1290 *
1357 1291 * Handshake to take EHCI control from BIOS if necessary. Its only valid for
1358 1292 * x86 machines, because sparc doesn't have a BIOS.
1359 1293 * On x86 machine, the take control process includes
1360 1294 * o get the base address of the extended capability list
1361 1295 * o find out the capability for handoff synchronization in the list.
1362 1296 * o check if BIOS has owned the host controller.
1363 1297 * o set the OS Owned semaphore bit, ask the BIOS to release the ownership.
1364 1298 * o wait for a constant time and check if BIOS has relinquished control.
1365 1299 */
1366 1300 /* ARGSUSED */
1367 1301 static int
1368 1302 ehci_take_control(ehci_state_t *ehcip)
1369 1303 {
1370 1304 #if defined(__x86)
1371 1305 uint32_t extended_cap;
1372 1306 uint32_t extended_cap_offset;
1373 1307 uint32_t extended_cap_id;
1374 1308 uint_t retry;
1375 1309
1376 1310 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1377 1311 "ehci_take_control:");
1378 1312
1379 1313 /*
1380 1314 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS
1381 1315 * register.
1382 1316 */
1383 1317 extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >>
1384 1318 EHCI_HCC_EECP_SHIFT;
1385 1319
1386 1320 /*
1387 1321 * According EHCI Spec 2.2.4, if the extended capability offset is
1388 1322 * less than 40h then its not valid. This means we don't need to
1389 1323 * worry about BIOS handoff.
1390 1324 */
1391 1325 if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) {
1392 1326
1393 1327 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1394 1328 "ehci_take_control: Hardware doesn't support legacy.");
1395 1329
1396 1330 goto success;
1397 1331 }
1398 1332
|
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
1399 1333 /*
1400 1334 * According EHCI Spec 2.1.7, A zero offset indicates the
1401 1335 * end of the extended capability list.
1402 1336 */
1403 1337 while (extended_cap_offset) {
1404 1338
1405 1339 /* Get the extended capability value. */
1406 1340 extended_cap = pci_config_get32(ehcip->ehci_config_handle,
1407 1341 extended_cap_offset);
1408 1342
1343 + /*
1344 + * It's possible that we'll receive an invalid PCI read here due
1345 + * to something going wrong due to platform firmware. This has
1346 + * been observed in the wild depending on the version of ACPI in
1347 + * use. If this happens, we'll assume that the capability does
1348 + * not exist and that we do not need to take control from the
1349 + * BIOS.
1350 + */
1351 + if (extended_cap == PCI_EINVAL32) {
1352 + extended_cap_id = EHCI_EX_CAP_ID_RESERVED;
1353 + break;
1354 + }
1355 +
1409 1356 /* Get the capability ID */
1410 1357 extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >>
1411 1358 EHCI_EX_CAP_ID_SHIFT;
1412 1359
1413 1360 /* Check if the card support legacy */
1414 1361 if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1415 1362 break;
1416 1363 }
1417 1364
1418 1365 /* Get the offset of the next capability */
1419 1366 extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >>
1420 1367 EHCI_EX_CAP_NEXT_PTR_SHIFT;
1368 +
1421 1369 }
1422 1370
1423 1371 /*
1424 1372 * Unable to find legacy support in hardware's extended capability list.
1425 1373 * This means we don't need to worry about BIOS handoff.
1426 1374 */
1427 1375 if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1428 1376
1429 1377 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1430 1378 "ehci_take_control: Hardware doesn't support legacy");
1431 1379
1432 1380 goto success;
1433 1381 }
1434 1382
1435 1383 /* Check if BIOS has owned it. */
1436 1384 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1437 1385
1438 1386 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1439 1387 "ehci_take_control: BIOS does not own EHCI");
1440 1388
1441 1389 goto success;
1442 1390 }
1443 1391
1444 1392 /*
1445 1393 * According EHCI Spec 5.1, The OS driver initiates an ownership
1446 1394 * request by setting the OS Owned semaphore to a one. The OS
1447 1395 * waits for the BIOS Owned bit to go to a zero before attempting
1448 1396 * to use the EHCI controller. The time that OS must wait for BIOS
1449 1397 * to respond to the request for ownership is beyond the scope of
1450 1398 * this specification.
1451 1399 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms
1452 1400 * for BIOS to release the ownership.
1453 1401 */
1454 1402 extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM;
1455 1403 pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset,
1456 1404 extended_cap);
1457 1405
1458 1406 for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) {
1459 1407
1460 1408 /* wait a special interval */
1461 1409 #ifndef __lock_lint
1462 1410 delay(drv_usectohz(EHCI_TAKEOVER_DELAY));
1463 1411 #endif
1464 1412 /* Check to see if the BIOS has released the ownership */
1465 1413 extended_cap = pci_config_get32(
1466 1414 ehcip->ehci_config_handle, extended_cap_offset);
1467 1415
1468 1416 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1469 1417
1470 1418 USB_DPRINTF_L3(PRINT_MASK_ATTA,
1471 1419 ehcip->ehci_log_hdl,
1472 1420 "ehci_take_control: BIOS has released "
1473 1421 "the ownership. retry = %d", retry);
1474 1422
1475 1423 goto success;
1476 1424 }
1477 1425
1478 1426 }
1479 1427
1480 1428 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1481 1429 "ehci_take_control: take control from BIOS failed.");
1482 1430
1483 1431 return (USB_FAILURE);
1484 1432
1485 1433 success:
1486 1434
1487 1435 #endif /* __x86 */
1488 1436 return (USB_SUCCESS);
1489 1437 }
1490 1438
1491 1439
1492 1440 /*
1493 1441 * ehci_init_periodic_frame_list_table :
1494 1442 *
1495 1443 * Allocate the system memory and initialize Host Controller
1496 1444 * Periodic Frame List table area. The starting of the Periodic
1497 1445 * Frame List Table area must be 4096 byte aligned.
1498 1446 */
1499 1447 static int
1500 1448 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip)
1501 1449 {
1502 1450 ddi_device_acc_attr_t dev_attr;
1503 1451 size_t real_length;
1504 1452 uint_t ccount;
1505 1453 int result;
1506 1454
1507 1455 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1508 1456
1509 1457 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1510 1458 "ehci_init_periodic_frame_lst_table:");
1511 1459
1512 1460 /* The host controller will be little endian */
1513 1461 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1514 1462 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1515 1463 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1516 1464
1517 1465 /* Force the required 4K restrictive alignment */
1518 1466 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT;
1519 1467
1520 1468 /* Create space for the Periodic Frame List */
1521 1469 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
1522 1470 DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) {
1523 1471
1524 1472 goto failure;
1525 1473 }
1526 1474
1527 1475 if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle,
1528 1476 sizeof (ehci_periodic_frame_list_t),
1529 1477 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
1530 1478 0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep,
1531 1479 &real_length, &ehcip->ehci_pflt_mem_handle)) {
1532 1480
1533 1481 goto failure;
1534 1482 }
1535 1483
1536 1484 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1537 1485 "ehci_init_periodic_frame_lst_table: "
1538 1486 "Real length %lu", real_length);
1539 1487
1540 1488 /* Map the whole Periodic Frame List into the I/O address space */
1541 1489 result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle,
1542 1490 NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep,
1543 1491 real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1544 1492 DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount);
1545 1493
1546 1494 if (result == DDI_DMA_MAPPED) {
1547 1495 /* The cookie count should be 1 */
1548 1496 if (ccount != 1) {
1549 1497 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1550 1498 "ehci_init_periodic_frame_lst_table: "
1551 1499 "More than 1 cookie");
1552 1500
1553 1501 goto failure;
1554 1502 }
1555 1503 } else {
1556 1504 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
1557 1505
1558 1506 goto failure;
1559 1507 }
1560 1508
1561 1509 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1562 1510 "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x",
1563 1511 (void *)ehcip->ehci_periodic_frame_list_tablep,
1564 1512 ehcip->ehci_pflt_cookie.dmac_address);
1565 1513
1566 1514 /*
1567 1515 * DMA addresses for Periodic Frame List are bound.
1568 1516 */
1569 1517 ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND;
1570 1518
1571 1519 bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length);
1572 1520
1573 1521 /* Initialize the Periodic Frame List */
1574 1522 ehci_build_interrupt_lattice(ehcip);
1575 1523
1576 1524 /* Reset Byte Alignment to Default */
1577 1525 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1578 1526
1579 1527 return (DDI_SUCCESS);
1580 1528 failure:
1581 1529 /* Byte alignment */
1582 1530 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1583 1531
1584 1532 return (DDI_FAILURE);
1585 1533 }
1586 1534
1587 1535
1588 1536 /*
1589 1537 * ehci_build_interrupt_lattice:
1590 1538 *
1591 1539 * Construct the interrupt lattice tree using static Endpoint Descriptors
1592 1540 * (QH). This interrupt lattice tree will have total of 32 interrupt QH
1593 1541 * lists and the Host Controller (HC) processes one interrupt QH list in
1594 1542 * every frame. The Host Controller traverses the periodic schedule by
1595 1543 * constructing an array offset reference from the Periodic List Base Address
1596 1544 * register and bits 12 to 3 of Frame Index register. It fetches the element
1597 1545 * and begins traversing the graph of linked schedule data structures.
1598 1546 */
1599 1547 static void
1600 1548 ehci_build_interrupt_lattice(ehci_state_t *ehcip)
1601 1549 {
1602 1550 ehci_qh_t *list_array = ehcip->ehci_qh_pool_addr;
1603 1551 ushort_t ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS];
1604 1552 ehci_periodic_frame_list_t *periodic_frame_list =
1605 1553 ehcip->ehci_periodic_frame_list_tablep;
1606 1554 ushort_t *temp, num_of_nodes;
1607 1555 uintptr_t addr;
1608 1556 int i, j, k;
1609 1557
1610 1558 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1611 1559 "ehci_build_interrupt_lattice:");
1612 1560
1613 1561 /*
1614 1562 * Reserve the first 63 Endpoint Descriptor (QH) structures
1615 1563 * in the pool as static endpoints & these are required for
1616 1564 * constructing interrupt lattice tree.
1617 1565 */
1618 1566 for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) {
1619 1567 Set_QH(list_array[i].qh_state, EHCI_QH_STATIC);
1620 1568 Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED);
1621 1569 Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID);
1622 1570 Set_QH(list_array[i].qh_alt_next_qtd,
1623 1571 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1624 1572 }
1625 1573
1626 1574 /*
1627 1575 * Make sure that last Endpoint on the periodic frame list terminates
1628 1576 * periodic schedule.
1629 1577 */
1630 1578 Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
1631 1579
1632 1580 /* Build the interrupt lattice tree */
1633 1581 for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) {
1634 1582 /*
1635 1583 * The next pointer in the host controller endpoint
1636 1584 * descriptor must contain an iommu address. Calculate
1637 1585 * the offset into the cpu address and add this to the
1638 1586 * starting iommu address.
1639 1587 */
1640 1588 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]);
1641 1589
1642 1590 Set_QH(list_array[2*i + 1].qh_link_ptr,
1643 1591 addr | EHCI_QH_LINK_REF_QH);
1644 1592 Set_QH(list_array[2*i + 2].qh_link_ptr,
1645 1593 addr | EHCI_QH_LINK_REF_QH);
1646 1594 }
1647 1595
1648 1596 /* Build the tree bottom */
1649 1597 temp = (unsigned short *)
1650 1598 kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP);
1651 1599
1652 1600 num_of_nodes = 1;
1653 1601
1654 1602 /*
1655 1603 * Initialize the values which are used for setting up head pointers
1656 1604 * for the 32ms scheduling lists which starts from the Periodic Frame
1657 1605 * List.
1658 1606 */
1659 1607 for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) {
1660 1608 for (j = 0, k = 0; k < num_of_nodes; k++, j++) {
1661 1609 ehci_index[j++] = temp[k];
1662 1610 ehci_index[j] = temp[k] + ehci_pow_2(i);
1663 1611 }
1664 1612
1665 1613 num_of_nodes *= 2;
1666 1614 for (k = 0; k < num_of_nodes; k++)
1667 1615 temp[k] = ehci_index[k];
1668 1616 }
1669 1617
1670 1618 kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2));
1671 1619
1672 1620 /*
1673 1621 * Initialize the interrupt list in the Periodic Frame List Table
1674 1622 * so that it points to the bottom of the tree.
1675 1623 */
1676 1624 for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) {
1677 1625 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)
1678 1626 (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1]));
1679 1627
1680 1628 ASSERT(addr);
1681 1629
1682 1630 for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) {
1683 1631 Set_PFLT(periodic_frame_list->
1684 1632 ehci_periodic_frame_list_table[ehci_index[j++]],
1685 1633 (uint32_t)(addr | EHCI_QH_LINK_REF_QH));
1686 1634 }
1687 1635 }
1688 1636 }
1689 1637
1690 1638
1691 1639 /*
1692 1640 * ehci_alloc_hcdi_ops:
1693 1641 *
1694 1642 * The HCDI interfaces or entry points are the software interfaces used by
1695 1643 * the Universal Serial Bus Driver (USBA) to access the services of the
1696 1644 * Host Controller Driver (HCD). During HCD initialization, inform USBA
1697 1645 * about all available HCDI interfaces or entry points.
1698 1646 */
1699 1647 usba_hcdi_ops_t *
1700 1648 ehci_alloc_hcdi_ops(ehci_state_t *ehcip)
1701 1649 {
1702 1650 usba_hcdi_ops_t *usba_hcdi_ops;
1703 1651
1704 1652 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1705 1653 "ehci_alloc_hcdi_ops:");
1706 1654
1707 1655 usba_hcdi_ops = usba_alloc_hcdi_ops();
1708 1656
1709 1657 usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION;
1710 1658
1711 1659 usba_hcdi_ops->usba_hcdi_pm_support = ehci_hcdi_pm_support;
1712 1660 usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open;
1713 1661 usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close;
1714 1662
1715 1663 usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset;
1716 1664 usba_hcdi_ops->usba_hcdi_pipe_reset_data_toggle =
1717 1665 ehci_hcdi_pipe_reset_data_toggle;
1718 1666
1719 1667 usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer;
1720 1668 usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer;
1721 1669 usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer;
1722 1670 usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer;
1723 1671
1724 1672 usba_hcdi_ops->usba_hcdi_bulk_transfer_size =
1725 1673 ehci_hcdi_bulk_transfer_size;
1726 1674
1727 1675 usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling =
1728 1676 ehci_hcdi_pipe_stop_intr_polling;
1729 1677 usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling =
1730 1678 ehci_hcdi_pipe_stop_isoc_polling;
1731 1679
1732 1680 usba_hcdi_ops->usba_hcdi_get_current_frame_number =
1733 1681 ehci_hcdi_get_current_frame_number;
1734 1682 usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts =
1735 1683 ehci_hcdi_get_max_isoc_pkts;
1736 1684
1737 1685 usba_hcdi_ops->usba_hcdi_console_input_init =
1738 1686 ehci_hcdi_polled_input_init;
1739 1687 usba_hcdi_ops->usba_hcdi_console_input_enter =
1740 1688 ehci_hcdi_polled_input_enter;
1741 1689 usba_hcdi_ops->usba_hcdi_console_read =
1742 1690 ehci_hcdi_polled_read;
1743 1691 usba_hcdi_ops->usba_hcdi_console_input_exit =
1744 1692 ehci_hcdi_polled_input_exit;
1745 1693 usba_hcdi_ops->usba_hcdi_console_input_fini =
1746 1694 ehci_hcdi_polled_input_fini;
1747 1695
1748 1696 usba_hcdi_ops->usba_hcdi_console_output_init =
1749 1697 ehci_hcdi_polled_output_init;
1750 1698 usba_hcdi_ops->usba_hcdi_console_output_enter =
1751 1699 ehci_hcdi_polled_output_enter;
1752 1700 usba_hcdi_ops->usba_hcdi_console_write =
1753 1701 ehci_hcdi_polled_write;
1754 1702 usba_hcdi_ops->usba_hcdi_console_output_exit =
1755 1703 ehci_hcdi_polled_output_exit;
1756 1704 usba_hcdi_ops->usba_hcdi_console_output_fini =
1757 1705 ehci_hcdi_polled_output_fini;
1758 1706 return (usba_hcdi_ops);
1759 1707 }
1760 1708
1761 1709
1762 1710 /*
1763 1711 * Host Controller Driver (HCD) deinitialization functions
1764 1712 */
1765 1713
1766 1714 /*
1767 1715 * ehci_cleanup:
1768 1716 *
1769 1717 * Cleanup on attach failure or detach
1770 1718 */
1771 1719 int
1772 1720 ehci_cleanup(ehci_state_t *ehcip)
1773 1721 {
1774 1722 ehci_trans_wrapper_t *tw;
1775 1723 ehci_pipe_private_t *pp;
1776 1724 ehci_qtd_t *qtd;
1777 1725 int i, ctrl, rval;
1778 1726 int flags = ehcip->ehci_flags;
1779 1727
1780 1728 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:");
1781 1729
1782 1730 if (flags & EHCI_RHREG) {
1783 1731 /* Unload the root hub driver */
1784 1732 if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) {
1785 1733
1786 1734 return (DDI_FAILURE);
1787 1735 }
1788 1736 }
1789 1737
1790 1738 if (flags & EHCI_USBAREG) {
1791 1739 /* Unregister this HCD instance with USBA */
1792 1740 usba_hcdi_unregister(ehcip->ehci_dip);
1793 1741 }
1794 1742
1795 1743 if (flags & EHCI_INTR) {
1796 1744
1797 1745 mutex_enter(&ehcip->ehci_int_mutex);
1798 1746
1799 1747 /* Disable all EHCI QH list processing */
1800 1748 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
1801 1749 ~(EHCI_CMD_ASYNC_SCHED_ENABLE |
1802 1750 EHCI_CMD_PERIODIC_SCHED_ENABLE)));
1803 1751
1804 1752 /* Disable all EHCI interrupts */
1805 1753 Set_OpReg(ehci_interrupt, 0);
1806 1754
1807 1755 /* wait for the next SOF */
1808 1756 (void) ehci_wait_for_sof(ehcip);
1809 1757
1810 1758 /* Route all Root hub ports to Classic host controller */
1811 1759 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1812 1760
1813 1761 /* Stop the EHCI host controller */
1814 1762 Set_OpReg(ehci_command,
1815 1763 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
1816 1764
1817 1765 mutex_exit(&ehcip->ehci_int_mutex);
1818 1766
1819 1767 /* Wait for sometime */
1820 1768 delay(drv_usectohz(EHCI_TIMEWAIT));
1821 1769
1822 1770 ehci_rem_intrs(ehcip);
1823 1771 }
1824 1772
1825 1773 /* Unmap the EHCI registers */
1826 1774 if (ehcip->ehci_caps_handle) {
1827 1775 ddi_regs_map_free(&ehcip->ehci_caps_handle);
1828 1776 }
1829 1777
1830 1778 if (ehcip->ehci_config_handle) {
1831 1779 pci_config_teardown(&ehcip->ehci_config_handle);
1832 1780 }
1833 1781
1834 1782 /* Free all the buffers */
1835 1783 if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) {
1836 1784 for (i = 0; i < ehci_qtd_pool_size; i ++) {
1837 1785 qtd = &ehcip->ehci_qtd_pool_addr[i];
1838 1786 ctrl = Get_QTD(ehcip->
1839 1787 ehci_qtd_pool_addr[i].qtd_state);
1840 1788
1841 1789 if ((ctrl != EHCI_QTD_FREE) &&
1842 1790 (ctrl != EHCI_QTD_DUMMY) &&
1843 1791 (qtd->qtd_trans_wrapper)) {
1844 1792
1845 1793 mutex_enter(&ehcip->ehci_int_mutex);
1846 1794
1847 1795 tw = (ehci_trans_wrapper_t *)
1848 1796 EHCI_LOOKUP_ID((uint32_t)
1849 1797 Get_QTD(qtd->qtd_trans_wrapper));
1850 1798
1851 1799 /* Obtain the pipe private structure */
1852 1800 pp = tw->tw_pipe_private;
1853 1801
1854 1802 /* Stop the the transfer timer */
1855 1803 ehci_stop_xfer_timer(ehcip, tw,
1856 1804 EHCI_REMOVE_XFER_ALWAYS);
1857 1805
1858 1806 ehci_deallocate_tw(ehcip, pp, tw);
1859 1807
1860 1808 mutex_exit(&ehcip->ehci_int_mutex);
1861 1809 }
1862 1810 }
1863 1811
1864 1812 /*
1865 1813 * If EHCI_QTD_POOL_BOUND flag is set, then unbind
1866 1814 * the handle for QTD pools.
1867 1815 */
1868 1816 if ((ehcip->ehci_dma_addr_bind_flag &
1869 1817 EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) {
1870 1818
1871 1819 rval = ddi_dma_unbind_handle(
1872 1820 ehcip->ehci_qtd_pool_dma_handle);
1873 1821
1874 1822 ASSERT(rval == DDI_SUCCESS);
1875 1823 }
1876 1824 ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle);
1877 1825 }
1878 1826
1879 1827 /* Free the QTD pool */
1880 1828 if (ehcip->ehci_qtd_pool_dma_handle) {
1881 1829 ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle);
1882 1830 }
1883 1831
1884 1832 if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) {
1885 1833 /*
1886 1834 * If EHCI_QH_POOL_BOUND flag is set, then unbind
1887 1835 * the handle for QH pools.
1888 1836 */
1889 1837 if ((ehcip->ehci_dma_addr_bind_flag &
1890 1838 EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) {
1891 1839
1892 1840 rval = ddi_dma_unbind_handle(
1893 1841 ehcip->ehci_qh_pool_dma_handle);
1894 1842
1895 1843 ASSERT(rval == DDI_SUCCESS);
1896 1844 }
1897 1845
1898 1846 ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle);
1899 1847 }
1900 1848
1901 1849 /* Free the QH pool */
1902 1850 if (ehcip->ehci_qh_pool_dma_handle) {
1903 1851 ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle);
1904 1852 }
1905 1853
1906 1854 /* Free the Periodic frame list table (PFLT) area */
1907 1855 if (ehcip->ehci_periodic_frame_list_tablep &&
1908 1856 ehcip->ehci_pflt_mem_handle) {
1909 1857 /*
1910 1858 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind
1911 1859 * the handle for PFLT.
1912 1860 */
1913 1861 if ((ehcip->ehci_dma_addr_bind_flag &
1914 1862 EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) {
1915 1863
1916 1864 rval = ddi_dma_unbind_handle(
1917 1865 ehcip->ehci_pflt_dma_handle);
1918 1866
1919 1867 ASSERT(rval == DDI_SUCCESS);
1920 1868 }
1921 1869
1922 1870 ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle);
1923 1871 }
|
↓ open down ↓ |
493 lines elided |
↑ open up ↑ |
1924 1872
1925 1873 (void) ehci_isoc_cleanup(ehcip);
1926 1874
1927 1875 if (ehcip->ehci_pflt_dma_handle) {
1928 1876 ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle);
1929 1877 }
1930 1878
1931 1879 if (flags & EHCI_INTR) {
1932 1880 /* Destroy the mutex */
1933 1881 mutex_destroy(&ehcip->ehci_int_mutex);
1934 -
1935 - /* Destroy the async schedule advance condition variable */
1936 - cv_destroy(&ehcip->ehci_async_schedule_advance_cv);
1937 1882 }
1938 1883
1939 1884 /* clean up kstat structs */
1940 1885 ehci_destroy_stats(ehcip);
1941 1886
1942 1887 /* Free ehci hcdi ops */
1943 1888 if (ehcip->ehci_hcdi_ops) {
1944 1889 usba_free_hcdi_ops(ehcip->ehci_hcdi_ops);
1945 1890 }
1946 1891
1947 1892 if (flags & EHCI_ZALLOC) {
1948 1893
1949 1894 usb_free_log_hdl(ehcip->ehci_log_hdl);
1950 1895
1951 1896 /* Remove all properties that might have been created */
1952 1897 ddi_prop_remove_all(ehcip->ehci_dip);
1953 1898
1954 1899 /* Free the soft state */
1955 1900 ddi_soft_state_free(ehci_statep,
1956 1901 ddi_get_instance(ehcip->ehci_dip));
1957 1902 }
1958 1903
1959 1904 return (DDI_SUCCESS);
1960 1905 }
1961 1906
1962 1907
1963 1908 /*
1964 1909 * ehci_rem_intrs:
1965 1910 *
1966 1911 * Unregister FIXED or MSI interrupts
1967 1912 */
1968 1913 static void
1969 1914 ehci_rem_intrs(ehci_state_t *ehcip)
1970 1915 {
1971 1916 int i;
1972 1917
1973 1918 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1974 1919 "ehci_rem_intrs: interrupt type 0x%x", ehcip->ehci_intr_type);
1975 1920
1976 1921 /* Disable all interrupts */
1977 1922 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
1978 1923 (void) ddi_intr_block_disable(ehcip->ehci_htable,
1979 1924 ehcip->ehci_intr_cnt);
1980 1925 } else {
1981 1926 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
1982 1927 (void) ddi_intr_disable(ehcip->ehci_htable[i]);
1983 1928 }
1984 1929 }
1985 1930
1986 1931 /* Call ddi_intr_remove_handler() */
1987 1932 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
1988 1933 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
1989 1934 (void) ddi_intr_free(ehcip->ehci_htable[i]);
1990 1935 }
1991 1936
1992 1937 kmem_free(ehcip->ehci_htable,
1993 1938 ehcip->ehci_intr_cnt * sizeof (ddi_intr_handle_t));
1994 1939 }
1995 1940
1996 1941
1997 1942 /*
1998 1943 * ehci_cpr_suspend
1999 1944 */
2000 1945 int
2001 1946 ehci_cpr_suspend(ehci_state_t *ehcip)
2002 1947 {
2003 1948 int i;
2004 1949
2005 1950 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2006 1951 "ehci_cpr_suspend:");
2007 1952
2008 1953 /* Call into the root hub and suspend it */
2009 1954 if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) {
2010 1955
2011 1956 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2012 1957 "ehci_cpr_suspend: root hub fails to suspend");
2013 1958
2014 1959 return (DDI_FAILURE);
2015 1960 }
2016 1961
2017 1962 /* Only root hub's intr pipe should be open at this time */
2018 1963 mutex_enter(&ehcip->ehci_int_mutex);
2019 1964
2020 1965 ASSERT(ehcip->ehci_open_pipe_count == 0);
2021 1966
2022 1967 /* Just wait till all resources are reclaimed */
2023 1968 i = 0;
2024 1969 while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) {
2025 1970 ehci_handle_endpoint_reclaimation(ehcip);
2026 1971 (void) ehci_wait_for_sof(ehcip);
2027 1972 }
2028 1973 ASSERT(ehcip->ehci_reclaim_list == NULL);
2029 1974
2030 1975 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2031 1976 "ehci_cpr_suspend: Disable HC QH list processing");
2032 1977
2033 1978 /* Disable all EHCI QH list processing */
2034 1979 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
2035 1980 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)));
2036 1981
2037 1982 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2038 1983 "ehci_cpr_suspend: Disable HC interrupts");
2039 1984
2040 1985 /* Disable all EHCI interrupts */
2041 1986 Set_OpReg(ehci_interrupt, 0);
2042 1987
2043 1988 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2044 1989 "ehci_cpr_suspend: Wait for the next SOF");
2045 1990
2046 1991 /* Wait for the next SOF */
2047 1992 if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) {
2048 1993
2049 1994 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2050 1995 "ehci_cpr_suspend: ehci host controller suspend failed");
2051 1996
2052 1997 mutex_exit(&ehcip->ehci_int_mutex);
2053 1998 return (DDI_FAILURE);
2054 1999 }
2055 2000
2056 2001 /*
2057 2002 * Stop the ehci host controller
2058 2003 * if usb keyboard is not connected.
2059 2004 */
2060 2005 if (ehcip->ehci_polled_kbd_count == 0 || force_ehci_off != 0) {
2061 2006 Set_OpReg(ehci_command,
2062 2007 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
2063 2008
2064 2009 }
2065 2010
2066 2011 /* Set host controller soft state to suspend */
2067 2012 ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE;
2068 2013
2069 2014 mutex_exit(&ehcip->ehci_int_mutex);
2070 2015
2071 2016 return (DDI_SUCCESS);
2072 2017 }
2073 2018
2074 2019
2075 2020 /*
2076 2021 * ehci_cpr_resume
2077 2022 */
2078 2023 int
2079 2024 ehci_cpr_resume(ehci_state_t *ehcip)
2080 2025 {
2081 2026 mutex_enter(&ehcip->ehci_int_mutex);
2082 2027
2083 2028 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2084 2029 "ehci_cpr_resume: Restart the controller");
2085 2030
2086 2031 /* Cleanup ehci specific information across cpr */
2087 2032 ehci_cpr_cleanup(ehcip);
2088 2033
2089 2034 /* Restart the controller */
2090 2035 if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) {
2091 2036
2092 2037 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2093 2038 "ehci_cpr_resume: ehci host controller resume failed ");
2094 2039
2095 2040 mutex_exit(&ehcip->ehci_int_mutex);
2096 2041
2097 2042 return (DDI_FAILURE);
2098 2043 }
2099 2044
2100 2045 mutex_exit(&ehcip->ehci_int_mutex);
2101 2046
2102 2047 /* Now resume the root hub */
2103 2048 if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) {
2104 2049
2105 2050 return (DDI_FAILURE);
2106 2051 }
2107 2052
2108 2053 return (DDI_SUCCESS);
2109 2054 }
2110 2055
2111 2056
2112 2057 /*
2113 2058 * Bandwidth Allocation functions
2114 2059 */
2115 2060
2116 2061 /*
2117 2062 * ehci_allocate_bandwidth:
2118 2063 *
2119 2064 * Figure out whether or not this interval may be supported. Return the index
2120 2065 * into the lattice if it can be supported. Return allocation failure if it
2121 2066 * can not be supported.
2122 2067 */
2123 2068 int
2124 2069 ehci_allocate_bandwidth(
2125 2070 ehci_state_t *ehcip,
2126 2071 usba_pipe_handle_data_t *ph,
2127 2072 uint_t *pnode,
2128 2073 uchar_t *smask,
2129 2074 uchar_t *cmask)
2130 2075 {
2131 2076 int error = USB_SUCCESS;
2132 2077
2133 2078 /* This routine is protected by the ehci_int_mutex */
2134 2079 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2135 2080
2136 2081 /* Reset the pnode to the last checked pnode */
2137 2082 *pnode = 0;
2138 2083
2139 2084 /* Allocate high speed bandwidth */
2140 2085 if ((error = ehci_allocate_high_speed_bandwidth(ehcip,
2141 2086 ph, pnode, smask, cmask)) != USB_SUCCESS) {
2142 2087
2143 2088 return (error);
2144 2089 }
2145 2090
2146 2091 /*
2147 2092 * For low/full speed usb devices, allocate classic TT bandwidth
2148 2093 * in additional to high speed bandwidth.
2149 2094 */
2150 2095 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2151 2096
2152 2097 /* Allocate classic TT bandwidth */
2153 2098 if ((error = ehci_allocate_classic_tt_bandwidth(
2154 2099 ehcip, ph, *pnode)) != USB_SUCCESS) {
2155 2100
2156 2101 /* Deallocate high speed bandwidth */
2157 2102 ehci_deallocate_high_speed_bandwidth(
2158 2103 ehcip, ph, *pnode, *smask, *cmask);
2159 2104 }
2160 2105 }
2161 2106
2162 2107 return (error);
2163 2108 }
2164 2109
2165 2110
2166 2111 /*
2167 2112 * ehci_allocate_high_speed_bandwidth:
2168 2113 *
2169 2114 * Allocate high speed bandwidth for the low/full/high speed interrupt and
2170 2115 * isochronous endpoints.
2171 2116 */
2172 2117 static int
2173 2118 ehci_allocate_high_speed_bandwidth(
2174 2119 ehci_state_t *ehcip,
2175 2120 usba_pipe_handle_data_t *ph,
2176 2121 uint_t *pnode,
2177 2122 uchar_t *smask,
2178 2123 uchar_t *cmask)
2179 2124 {
2180 2125 uint_t sbandwidth, cbandwidth;
2181 2126 int interval;
2182 2127 usb_ep_descr_t *endpoint = &ph->p_ep;
2183 2128 usba_device_t *child_ud;
2184 2129 usb_port_status_t port_status;
2185 2130 int error;
2186 2131
2187 2132 /* This routine is protected by the ehci_int_mutex */
2188 2133 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2189 2134
2190 2135 /* Get child's usba device structure */
2191 2136 child_ud = ph->p_usba_device;
2192 2137
2193 2138 mutex_enter(&child_ud->usb_mutex);
2194 2139
2195 2140 /* Get the current usb device's port status */
2196 2141 port_status = ph->p_usba_device->usb_port_status;
2197 2142
2198 2143 mutex_exit(&child_ud->usb_mutex);
2199 2144
2200 2145 /*
2201 2146 * Calculate the length in bytes of a transaction on this
2202 2147 * periodic endpoint. Return failure if maximum packet is
2203 2148 * zero.
2204 2149 */
2205 2150 error = ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2206 2151 port_status, &sbandwidth, &cbandwidth);
2207 2152 if (error != USB_SUCCESS) {
2208 2153
2209 2154 return (error);
2210 2155 }
2211 2156
2212 2157 /*
2213 2158 * Adjust polling interval to be a power of 2.
2214 2159 * If this interval can't be supported, return
2215 2160 * allocation failure.
2216 2161 */
2217 2162 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2218 2163 if (interval == USB_FAILURE) {
2219 2164
2220 2165 return (USB_FAILURE);
2221 2166 }
2222 2167
2223 2168 if (port_status == USBA_HIGH_SPEED_DEV) {
2224 2169 /* Allocate bandwidth for high speed devices */
2225 2170 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2226 2171 USB_EP_ATTR_ISOCH) {
2227 2172 error = USB_SUCCESS;
2228 2173 } else {
2229 2174
2230 2175 error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode,
2231 2176 endpoint, sbandwidth, interval);
2232 2177 }
2233 2178
2234 2179 *cmask = 0x00;
2235 2180
2236 2181 } else {
2237 2182 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2238 2183 USB_EP_ATTR_INTR) {
2239 2184
2240 2185 /* Allocate bandwidth for low speed interrupt */
2241 2186 error = ehci_find_bestfit_ls_intr_mask(ehcip,
2242 2187 smask, cmask, pnode, sbandwidth, cbandwidth,
2243 2188 interval);
2244 2189 } else {
2245 2190 if ((endpoint->bEndpointAddress &
2246 2191 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2247 2192
2248 2193 /* Allocate bandwidth for sitd in */
2249 2194 error = ehci_find_bestfit_sitd_in_mask(ehcip,
2250 2195 smask, cmask, pnode, sbandwidth, cbandwidth,
2251 2196 interval);
2252 2197 } else {
2253 2198
2254 2199 /* Allocate bandwidth for sitd out */
2255 2200 error = ehci_find_bestfit_sitd_out_mask(ehcip,
2256 2201 smask, pnode, sbandwidth, interval);
2257 2202 *cmask = 0x00;
2258 2203 }
2259 2204 }
2260 2205 }
2261 2206
2262 2207 if (error != USB_SUCCESS) {
2263 2208 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2264 2209 "ehci_allocate_high_speed_bandwidth: Reached maximum "
2265 2210 "bandwidth value and cannot allocate bandwidth for a "
2266 2211 "given high-speed periodic endpoint");
2267 2212
2268 2213 return (USB_NO_BANDWIDTH);
2269 2214 }
2270 2215
2271 2216 return (error);
2272 2217 }
2273 2218
2274 2219
2275 2220 /*
2276 2221 * ehci_allocate_classic_tt_speed_bandwidth:
2277 2222 *
2278 2223 * Allocate classic TT bandwidth for the low/full speed interrupt and
2279 2224 * isochronous endpoints.
2280 2225 */
2281 2226 static int
2282 2227 ehci_allocate_classic_tt_bandwidth(
2283 2228 ehci_state_t *ehcip,
2284 2229 usba_pipe_handle_data_t *ph,
2285 2230 uint_t pnode)
2286 2231 {
2287 2232 uint_t bandwidth, min;
2288 2233 uint_t height, leftmost, list;
2289 2234 usb_ep_descr_t *endpoint = &ph->p_ep;
2290 2235 usba_device_t *child_ud, *parent_ud;
2291 2236 usb_port_status_t port_status;
2292 2237 int i, interval;
2293 2238
2294 2239 /* This routine is protected by the ehci_int_mutex */
2295 2240 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2296 2241
2297 2242 /* Get child's usba device structure */
2298 2243 child_ud = ph->p_usba_device;
2299 2244
2300 2245 mutex_enter(&child_ud->usb_mutex);
2301 2246
2302 2247 /* Get the current usb device's port status */
2303 2248 port_status = child_ud->usb_port_status;
2304 2249
2305 2250 /* Get the parent high speed hub's usba device structure */
2306 2251 parent_ud = child_ud->usb_hs_hub_usba_dev;
2307 2252
2308 2253 mutex_exit(&child_ud->usb_mutex);
2309 2254
2310 2255 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2311 2256 "ehci_allocate_classic_tt_bandwidth: "
2312 2257 "child_ud 0x%p parent_ud 0x%p",
2313 2258 (void *)child_ud, (void *)parent_ud);
2314 2259
2315 2260 /*
2316 2261 * Calculate the length in bytes of a transaction on this
2317 2262 * periodic endpoint. Return failure if maximum packet is
2318 2263 * zero.
2319 2264 */
2320 2265 if (ehci_compute_classic_bandwidth(endpoint,
2321 2266 port_status, &bandwidth) != USB_SUCCESS) {
2322 2267
2323 2268 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2324 2269 "ehci_allocate_classic_tt_bandwidth: Periodic endpoint "
2325 2270 "with zero endpoint maximum packet size is not supported");
2326 2271
2327 2272 return (USB_NOT_SUPPORTED);
2328 2273 }
2329 2274
2330 2275 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2331 2276 "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth);
2332 2277
2333 2278 mutex_enter(&parent_ud->usb_mutex);
2334 2279
2335 2280 /*
2336 2281 * If the length in bytes plus the allocated bandwidth exceeds
2337 2282 * the maximum, return bandwidth allocation failure.
2338 2283 */
2339 2284 if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) >
2340 2285 FS_PERIODIC_BANDWIDTH) {
2341 2286
2342 2287 mutex_exit(&parent_ud->usb_mutex);
2343 2288
2344 2289 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2345 2290 "ehci_allocate_classic_tt_bandwidth: Reached maximum "
2346 2291 "bandwidth value and cannot allocate bandwidth for a "
2347 2292 "given low/full speed periodic endpoint");
2348 2293
2349 2294 return (USB_NO_BANDWIDTH);
2350 2295 }
2351 2296
2352 2297 mutex_exit(&parent_ud->usb_mutex);
2353 2298
2354 2299 /* Adjust polling interval to be a power of 2 */
2355 2300 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2356 2301
2357 2302 /* Find the height in the tree */
2358 2303 height = ehci_lattice_height(interval);
2359 2304
2360 2305 /* Find the leftmost leaf in the subtree specified by the node. */
2361 2306 leftmost = ehci_leftmost_leaf(pnode, height);
2362 2307
2363 2308 mutex_enter(&parent_ud->usb_mutex);
2364 2309
2365 2310 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2366 2311 list = ehci_index[leftmost + i];
2367 2312
2368 2313 if ((parent_ud->usb_hs_hub_bandwidth[list] +
2369 2314 bandwidth) > FS_PERIODIC_BANDWIDTH) {
2370 2315
2371 2316 mutex_exit(&parent_ud->usb_mutex);
2372 2317
2373 2318 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2374 2319 "ehci_allocate_classic_tt_bandwidth: Reached "
2375 2320 "maximum bandwidth value and cannot allocate "
2376 2321 "bandwidth for low/full periodic endpoint");
2377 2322
2378 2323 return (USB_NO_BANDWIDTH);
2379 2324 }
2380 2325 }
2381 2326
2382 2327 /*
2383 2328 * All the leaves for this node must be updated with the bandwidth.
2384 2329 */
2385 2330 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2386 2331 list = ehci_index[leftmost + i];
2387 2332 parent_ud->usb_hs_hub_bandwidth[list] += bandwidth;
2388 2333 }
2389 2334
2390 2335 /* Find the leaf with the smallest allocated bandwidth */
2391 2336 min = parent_ud->usb_hs_hub_bandwidth[0];
2392 2337
2393 2338 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2394 2339 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2395 2340 min = parent_ud->usb_hs_hub_bandwidth[i];
2396 2341 }
2397 2342 }
2398 2343
2399 2344 /* Save the minimum for later use */
2400 2345 parent_ud->usb_hs_hub_min_bandwidth = min;
2401 2346
2402 2347 mutex_exit(&parent_ud->usb_mutex);
2403 2348
2404 2349 return (USB_SUCCESS);
2405 2350 }
2406 2351
2407 2352
2408 2353 /*
2409 2354 * ehci_deallocate_bandwidth:
2410 2355 *
2411 2356 * Deallocate bandwidth for the given node in the lattice and the length
2412 2357 * of transfer.
2413 2358 */
2414 2359 void
2415 2360 ehci_deallocate_bandwidth(
2416 2361 ehci_state_t *ehcip,
2417 2362 usba_pipe_handle_data_t *ph,
2418 2363 uint_t pnode,
2419 2364 uchar_t smask,
2420 2365 uchar_t cmask)
2421 2366 {
2422 2367 /* This routine is protected by the ehci_int_mutex */
2423 2368 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2424 2369
2425 2370 ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask);
2426 2371
2427 2372 /*
2428 2373 * For low/full speed usb devices, deallocate classic TT bandwidth
2429 2374 * in additional to high speed bandwidth.
2430 2375 */
2431 2376 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2432 2377
2433 2378 /* Deallocate classic TT bandwidth */
2434 2379 ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode);
2435 2380 }
2436 2381 }
2437 2382
2438 2383
2439 2384 /*
2440 2385 * ehci_deallocate_high_speed_bandwidth:
2441 2386 *
2442 2387 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2443 2388 */
2444 2389 static void
2445 2390 ehci_deallocate_high_speed_bandwidth(
2446 2391 ehci_state_t *ehcip,
2447 2392 usba_pipe_handle_data_t *ph,
2448 2393 uint_t pnode,
2449 2394 uchar_t smask,
2450 2395 uchar_t cmask)
2451 2396 {
2452 2397 uint_t height, leftmost;
2453 2398 uint_t list_count;
2454 2399 uint_t sbandwidth, cbandwidth;
2455 2400 int interval;
2456 2401 usb_ep_descr_t *endpoint = &ph->p_ep;
2457 2402 usba_device_t *child_ud;
2458 2403 usb_port_status_t port_status;
2459 2404
2460 2405 /* This routine is protected by the ehci_int_mutex */
2461 2406 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2462 2407
2463 2408 /* Get child's usba device structure */
2464 2409 child_ud = ph->p_usba_device;
2465 2410
2466 2411 mutex_enter(&child_ud->usb_mutex);
2467 2412
2468 2413 /* Get the current usb device's port status */
2469 2414 port_status = ph->p_usba_device->usb_port_status;
2470 2415
2471 2416 mutex_exit(&child_ud->usb_mutex);
2472 2417
2473 2418 (void) ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2474 2419 port_status, &sbandwidth, &cbandwidth);
2475 2420
2476 2421 /* Adjust polling interval to be a power of 2 */
2477 2422 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2478 2423
2479 2424 /* Find the height in the tree */
2480 2425 height = ehci_lattice_height(interval);
2481 2426
2482 2427 /*
2483 2428 * Find the leftmost leaf in the subtree specified by the node
2484 2429 */
2485 2430 leftmost = ehci_leftmost_leaf(pnode, height);
2486 2431
2487 2432 list_count = EHCI_NUM_INTR_QH_LISTS/interval;
2488 2433
2489 2434 /* Delete the bandwidth from the appropriate lists */
2490 2435 if (port_status == USBA_HIGH_SPEED_DEV) {
2491 2436
2492 2437 ehci_update_bw_availability(ehcip, -sbandwidth,
2493 2438 leftmost, list_count, smask);
2494 2439 } else {
2495 2440 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2496 2441 USB_EP_ATTR_INTR) {
2497 2442
2498 2443 ehci_update_bw_availability(ehcip, -sbandwidth,
2499 2444 leftmost, list_count, smask);
2500 2445 ehci_update_bw_availability(ehcip, -cbandwidth,
2501 2446 leftmost, list_count, cmask);
2502 2447 } else {
2503 2448 if ((endpoint->bEndpointAddress &
2504 2449 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2505 2450
2506 2451 ehci_update_bw_availability(ehcip, -sbandwidth,
2507 2452 leftmost, list_count, smask);
2508 2453 ehci_update_bw_availability(ehcip,
2509 2454 -MAX_UFRAME_SITD_XFER, leftmost,
2510 2455 list_count, cmask);
2511 2456 } else {
2512 2457
2513 2458 ehci_update_bw_availability(ehcip,
2514 2459 -MAX_UFRAME_SITD_XFER, leftmost,
2515 2460 list_count, smask);
2516 2461 }
2517 2462 }
2518 2463 }
2519 2464 }
2520 2465
2521 2466 /*
2522 2467 * ehci_deallocate_classic_tt_bandwidth:
2523 2468 *
2524 2469 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2525 2470 */
2526 2471 static void
2527 2472 ehci_deallocate_classic_tt_bandwidth(
2528 2473 ehci_state_t *ehcip,
2529 2474 usba_pipe_handle_data_t *ph,
2530 2475 uint_t pnode)
2531 2476 {
2532 2477 uint_t bandwidth, height, leftmost, list, min;
2533 2478 int i, interval;
2534 2479 usb_ep_descr_t *endpoint = &ph->p_ep;
2535 2480 usba_device_t *child_ud, *parent_ud;
2536 2481 usb_port_status_t port_status;
2537 2482
2538 2483 /* This routine is protected by the ehci_int_mutex */
2539 2484 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2540 2485
2541 2486 /* Get child's usba device structure */
2542 2487 child_ud = ph->p_usba_device;
2543 2488
2544 2489 mutex_enter(&child_ud->usb_mutex);
2545 2490
2546 2491 /* Get the current usb device's port status */
2547 2492 port_status = child_ud->usb_port_status;
2548 2493
2549 2494 /* Get the parent high speed hub's usba device structure */
2550 2495 parent_ud = child_ud->usb_hs_hub_usba_dev;
2551 2496
2552 2497 mutex_exit(&child_ud->usb_mutex);
2553 2498
2554 2499 /* Obtain the bandwidth */
2555 2500 (void) ehci_compute_classic_bandwidth(endpoint,
2556 2501 port_status, &bandwidth);
2557 2502
2558 2503 /* Adjust polling interval to be a power of 2 */
2559 2504 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2560 2505
2561 2506 /* Find the height in the tree */
2562 2507 height = ehci_lattice_height(interval);
2563 2508
2564 2509 /* Find the leftmost leaf in the subtree specified by the node */
2565 2510 leftmost = ehci_leftmost_leaf(pnode, height);
2566 2511
2567 2512 mutex_enter(&parent_ud->usb_mutex);
2568 2513
2569 2514 /* Delete the bandwidth from the appropriate lists */
2570 2515 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2571 2516 list = ehci_index[leftmost + i];
2572 2517 parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth;
2573 2518 }
2574 2519
2575 2520 /* Find the leaf with the smallest allocated bandwidth */
2576 2521 min = parent_ud->usb_hs_hub_bandwidth[0];
2577 2522
2578 2523 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2579 2524 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2580 2525 min = parent_ud->usb_hs_hub_bandwidth[i];
2581 2526 }
2582 2527 }
2583 2528
2584 2529 /* Save the minimum for later use */
2585 2530 parent_ud->usb_hs_hub_min_bandwidth = min;
2586 2531
2587 2532 mutex_exit(&parent_ud->usb_mutex);
2588 2533 }
2589 2534
2590 2535
2591 2536 /*
2592 2537 * ehci_compute_high_speed_bandwidth:
2593 2538 *
2594 2539 * Given a periodic endpoint (interrupt or isochronous) determine the total
2595 2540 * bandwidth for one transaction. The EHCI host controller traverses the
2596 2541 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2597 2542 * services an endpoint, only a single transaction attempt is made. The HC
2598 2543 * moves to the next Endpoint Descriptor after the first transaction attempt
2599 2544 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2600 2545 * Transfer Descriptor is inserted into the lattice, we will only count the
2601 2546 * number of bytes for one transaction.
2602 2547 *
2603 2548 * The following are the formulas used for calculating bandwidth in terms
2604 2549 * bytes and it is for the single USB high speed transaction. The protocol
2605 2550 * overheads will be different for each of type of USB transfer & all these
2606 2551 * formulas & protocol overheads are derived from the 5.11.3 section of the
2607 2552 * USB 2.0 Specification.
2608 2553 *
2609 2554 * High-Speed:
2610 2555 * Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay
2611 2556 *
2612 2557 * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub)
2613 2558 *
2614 2559 * Protocol overhead + Split transaction overhead +
2615 2560 * ((MaxPktSz * 7)/6) + Host_Delay;
2616 2561 */
2617 2562 /* ARGSUSED */
2618 2563 static int
2619 2564 ehci_compute_high_speed_bandwidth(
2620 2565 ehci_state_t *ehcip,
2621 2566 usb_ep_descr_t *endpoint,
2622 2567 usb_port_status_t port_status,
2623 2568 uint_t *sbandwidth,
2624 2569 uint_t *cbandwidth)
2625 2570 {
2626 2571 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2627 2572
2628 2573 /* Return failure if endpoint maximum packet is zero */
2629 2574 if (maxpacketsize == 0) {
2630 2575 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2631 2576 "ehci_allocate_high_speed_bandwidth: Periodic endpoint "
2632 2577 "with zero endpoint maximum packet size is not supported");
2633 2578
2634 2579 return (USB_NOT_SUPPORTED);
2635 2580 }
2636 2581
2637 2582 /* Add bit-stuffing overhead */
2638 2583 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2639 2584
2640 2585 /* Add Host Controller specific delay to required bandwidth */
2641 2586 *sbandwidth = EHCI_HOST_CONTROLLER_DELAY;
2642 2587
2643 2588 /* Add xfer specific protocol overheads */
2644 2589 if ((endpoint->bmAttributes &
2645 2590 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2646 2591 /* High speed interrupt transaction */
2647 2592 *sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD;
2648 2593 } else {
2649 2594 /* Isochronous transaction */
2650 2595 *sbandwidth += HS_ISOC_PROTO_OVERHEAD;
2651 2596 }
2652 2597
2653 2598 /*
2654 2599 * For low/full speed devices, add split transaction specific
2655 2600 * overheads.
2656 2601 */
2657 2602 if (port_status != USBA_HIGH_SPEED_DEV) {
2658 2603 /*
2659 2604 * Add start and complete split transaction
2660 2605 * tokens overheads.
2661 2606 */
2662 2607 *cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD;
2663 2608 *sbandwidth += START_SPLIT_OVERHEAD;
2664 2609
2665 2610 /* Add data overhead depending on data direction */
2666 2611 if ((endpoint->bEndpointAddress &
2667 2612 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2668 2613 *cbandwidth += maxpacketsize;
2669 2614 } else {
2670 2615 if ((endpoint->bmAttributes &
2671 2616 USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) {
2672 2617 /* There is no compete splits for out */
2673 2618 *cbandwidth = 0;
2674 2619 }
2675 2620 *sbandwidth += maxpacketsize;
2676 2621 }
2677 2622 } else {
2678 2623 uint_t xactions;
2679 2624
2680 2625 /* Get the max transactions per microframe */
2681 2626 xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >>
2682 2627 USB_EP_MAX_XACTS_SHIFT) + 1;
2683 2628
2684 2629 /* High speed transaction */
2685 2630 *sbandwidth += maxpacketsize;
2686 2631
2687 2632 /* Calculate bandwidth per micro-frame */
2688 2633 *sbandwidth *= xactions;
2689 2634
2690 2635 *cbandwidth = 0;
2691 2636 }
2692 2637
2693 2638 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2694 2639 "ehci_allocate_high_speed_bandwidth: "
2695 2640 "Start split bandwidth %d Complete split bandwidth %d",
2696 2641 *sbandwidth, *cbandwidth);
2697 2642
2698 2643 return (USB_SUCCESS);
2699 2644 }
2700 2645
2701 2646
2702 2647 /*
2703 2648 * ehci_compute_classic_bandwidth:
2704 2649 *
2705 2650 * Given a periodic endpoint (interrupt or isochronous) determine the total
2706 2651 * bandwidth for one transaction. The EHCI host controller traverses the
2707 2652 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2708 2653 * services an endpoint, only a single transaction attempt is made. The HC
2709 2654 * moves to the next Endpoint Descriptor after the first transaction attempt
2710 2655 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2711 2656 * Transfer Descriptor is inserted into the lattice, we will only count the
2712 2657 * number of bytes for one transaction.
2713 2658 *
2714 2659 * The following are the formulas used for calculating bandwidth in terms
2715 2660 * bytes and it is for the single USB high speed transaction. The protocol
2716 2661 * overheads will be different for each of type of USB transfer & all these
2717 2662 * formulas & protocol overheads are derived from the 5.11.3 section of the
2718 2663 * USB 2.0 Specification.
2719 2664 *
2720 2665 * Low-Speed:
2721 2666 * Protocol overhead + Hub LS overhead +
2722 2667 * (Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay
2723 2668 *
2724 2669 * Full-Speed:
2725 2670 * Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay
2726 2671 */
2727 2672 /* ARGSUSED */
2728 2673 static int
2729 2674 ehci_compute_classic_bandwidth(
2730 2675 usb_ep_descr_t *endpoint,
2731 2676 usb_port_status_t port_status,
2732 2677 uint_t *bandwidth)
2733 2678 {
2734 2679 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2735 2680
2736 2681 /*
2737 2682 * If endpoint maximum packet is zero, then return immediately.
2738 2683 */
2739 2684 if (maxpacketsize == 0) {
2740 2685
2741 2686 return (USB_NOT_SUPPORTED);
2742 2687 }
2743 2688
2744 2689 /* Add TT delay to required bandwidth */
2745 2690 *bandwidth = TT_DELAY;
2746 2691
2747 2692 /* Add bit-stuffing overhead */
2748 2693 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2749 2694
2750 2695 switch (port_status) {
2751 2696 case USBA_LOW_SPEED_DEV:
2752 2697 /* Low speed interrupt transaction */
2753 2698 *bandwidth += (LOW_SPEED_PROTO_OVERHEAD +
2754 2699 HUB_LOW_SPEED_PROTO_OVERHEAD +
2755 2700 (LOW_SPEED_CLOCK * maxpacketsize));
2756 2701 break;
2757 2702 case USBA_FULL_SPEED_DEV:
2758 2703 /* Full speed transaction */
2759 2704 *bandwidth += maxpacketsize;
2760 2705
2761 2706 /* Add xfer specific protocol overheads */
2762 2707 if ((endpoint->bmAttributes &
2763 2708 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2764 2709 /* Full speed interrupt transaction */
2765 2710 *bandwidth += FS_NON_ISOC_PROTO_OVERHEAD;
2766 2711 } else {
2767 2712 /* Isochronous and input transaction */
2768 2713 if ((endpoint->bEndpointAddress &
2769 2714 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2770 2715 *bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD;
2771 2716 } else {
2772 2717 /* Isochronous and output transaction */
2773 2718 *bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD;
2774 2719 }
2775 2720 }
2776 2721 break;
2777 2722 }
2778 2723
2779 2724 return (USB_SUCCESS);
2780 2725 }
2781 2726
2782 2727
2783 2728 /*
2784 2729 * ehci_adjust_polling_interval:
2785 2730 *
2786 2731 * Adjust bandwidth according usb device speed.
2787 2732 */
2788 2733 /* ARGSUSED */
2789 2734 int
2790 2735 ehci_adjust_polling_interval(
2791 2736 ehci_state_t *ehcip,
2792 2737 usb_ep_descr_t *endpoint,
2793 2738 usb_port_status_t port_status)
2794 2739 {
2795 2740 uint_t interval;
2796 2741 int i = 0;
2797 2742
2798 2743 /* Get the polling interval */
2799 2744 interval = endpoint->bInterval;
2800 2745
2801 2746 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2802 2747 "ehci_adjust_polling_interval: Polling interval 0x%x", interval);
2803 2748
2804 2749 /*
2805 2750 * According USB 2.0 Specifications, a high-speed endpoint's
2806 2751 * polling intervals are specified interms of 125us or micro
2807 2752 * frame, where as full/low endpoint's polling intervals are
2808 2753 * specified in milliseconds.
2809 2754 *
2810 2755 * A high speed interrupt/isochronous endpoints can specify
2811 2756 * desired polling interval between 1 to 16 micro-frames,
2812 2757 * where as full/low endpoints can specify between 1 to 255
2813 2758 * milliseconds.
2814 2759 */
2815 2760 switch (port_status) {
2816 2761 case USBA_LOW_SPEED_DEV:
2817 2762 /*
2818 2763 * Low speed endpoints are limited to specifying
2819 2764 * only 8ms to 255ms in this driver. If a device
2820 2765 * reports a polling interval that is less than 8ms,
2821 2766 * it will use 8 ms instead.
2822 2767 */
2823 2768 if (interval < LS_MIN_POLL_INTERVAL) {
2824 2769
2825 2770 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2826 2771 "Low speed endpoint's poll interval of %d ms "
2827 2772 "is below threshold. Rounding up to %d ms",
2828 2773 interval, LS_MIN_POLL_INTERVAL);
2829 2774
2830 2775 interval = LS_MIN_POLL_INTERVAL;
2831 2776 }
2832 2777
2833 2778 /*
2834 2779 * Return an error if the polling interval is greater
2835 2780 * than 255ms.
2836 2781 */
2837 2782 if (interval > LS_MAX_POLL_INTERVAL) {
2838 2783
2839 2784 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2840 2785 "Low speed endpoint's poll interval is "
2841 2786 "greater than %d ms", LS_MAX_POLL_INTERVAL);
2842 2787
2843 2788 return (USB_FAILURE);
2844 2789 }
2845 2790 break;
2846 2791
2847 2792 case USBA_FULL_SPEED_DEV:
2848 2793 /*
2849 2794 * Return an error if the polling interval is less
2850 2795 * than 1ms and greater than 255ms.
2851 2796 */
2852 2797 if ((interval < FS_MIN_POLL_INTERVAL) &&
2853 2798 (interval > FS_MAX_POLL_INTERVAL)) {
2854 2799
2855 2800 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2856 2801 "Full speed endpoint's poll interval must "
2857 2802 "be between %d and %d ms", FS_MIN_POLL_INTERVAL,
2858 2803 FS_MAX_POLL_INTERVAL);
2859 2804
2860 2805 return (USB_FAILURE);
2861 2806 }
2862 2807 break;
2863 2808 case USBA_HIGH_SPEED_DEV:
2864 2809 /*
2865 2810 * Return an error if the polling interval is less 1
2866 2811 * and greater than 16. Convert this value to 125us
2867 2812 * units using 2^(bInterval -1). refer usb 2.0 spec
2868 2813 * page 51 for details.
2869 2814 */
2870 2815 if ((interval < HS_MIN_POLL_INTERVAL) &&
2871 2816 (interval > HS_MAX_POLL_INTERVAL)) {
2872 2817
2873 2818 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2874 2819 "High speed endpoint's poll interval "
2875 2820 "must be between %d and %d units",
2876 2821 HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL);
2877 2822
2878 2823 return (USB_FAILURE);
2879 2824 }
2880 2825
2881 2826 /* Adjust high speed device polling interval */
2882 2827 interval =
2883 2828 ehci_adjust_high_speed_polling_interval(ehcip, endpoint);
2884 2829
2885 2830 break;
2886 2831 }
2887 2832
2888 2833 /*
2889 2834 * If polling interval is greater than 32ms,
2890 2835 * adjust polling interval equal to 32ms.
2891 2836 */
2892 2837 if (interval > EHCI_NUM_INTR_QH_LISTS) {
2893 2838 interval = EHCI_NUM_INTR_QH_LISTS;
2894 2839 }
2895 2840
2896 2841 /*
2897 2842 * Find the nearest power of 2 that's less
2898 2843 * than interval.
2899 2844 */
2900 2845 while ((ehci_pow_2(i)) <= interval) {
2901 2846 i++;
2902 2847 }
2903 2848
2904 2849 return (ehci_pow_2((i - 1)));
2905 2850 }
2906 2851
2907 2852
2908 2853 /*
2909 2854 * ehci_adjust_high_speed_polling_interval:
2910 2855 */
2911 2856 /* ARGSUSED */
2912 2857 static int
2913 2858 ehci_adjust_high_speed_polling_interval(
2914 2859 ehci_state_t *ehcip,
2915 2860 usb_ep_descr_t *endpoint)
2916 2861 {
2917 2862 uint_t interval;
2918 2863
2919 2864 /* Get the polling interval */
2920 2865 interval = ehci_pow_2(endpoint->bInterval - 1);
2921 2866
2922 2867 /*
2923 2868 * Convert polling interval from micro seconds
2924 2869 * to milli seconds.
2925 2870 */
2926 2871 if (interval <= EHCI_MAX_UFRAMES) {
2927 2872 interval = 1;
2928 2873 } else {
2929 2874 interval = interval/EHCI_MAX_UFRAMES;
2930 2875 }
2931 2876
2932 2877 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2933 2878 "ehci_adjust_high_speed_polling_interval: "
2934 2879 "High speed adjusted interval 0x%x", interval);
2935 2880
2936 2881 return (interval);
2937 2882 }
2938 2883
2939 2884
2940 2885 /*
2941 2886 * ehci_lattice_height:
2942 2887 *
2943 2888 * Given the requested bandwidth, find the height in the tree at which the
2944 2889 * nodes for this bandwidth fall. The height is measured as the number of
2945 2890 * nodes from the leaf to the level specified by bandwidth The root of the
2946 2891 * tree is at height TREE_HEIGHT.
2947 2892 */
2948 2893 static uint_t
2949 2894 ehci_lattice_height(uint_t interval)
2950 2895 {
2951 2896 return (TREE_HEIGHT - (ehci_log_2(interval)));
2952 2897 }
2953 2898
2954 2899
2955 2900 /*
2956 2901 * ehci_lattice_parent:
2957 2902 *
2958 2903 * Given a node in the lattice, find the index of the parent node
2959 2904 */
2960 2905 static uint_t
2961 2906 ehci_lattice_parent(uint_t node)
2962 2907 {
2963 2908 if ((node % 2) == 0) {
2964 2909
2965 2910 return ((node/2) - 1);
2966 2911 } else {
2967 2912
2968 2913 return ((node + 1)/2 - 1);
|
↓ open down ↓ |
1022 lines elided |
↑ open up ↑ |
2969 2914 }
2970 2915 }
2971 2916
2972 2917
2973 2918 /*
2974 2919 * ehci_find_periodic_node:
2975 2920 *
2976 2921 * Based on the "real" array leaf node and interval, get the periodic node.
2977 2922 */
2978 2923 static uint_t
2979 -ehci_find_periodic_node(uint_t leaf, int interval) {
2924 +ehci_find_periodic_node(uint_t leaf, int interval)
2925 +{
2980 2926 uint_t lattice_leaf;
2981 2927 uint_t height = ehci_lattice_height(interval);
2982 2928 uint_t pnode;
2983 2929 int i;
2984 2930
2985 2931 /* Get the leaf number in the lattice */
2986 2932 lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1;
2987 2933
2988 2934 /* Get the node in the lattice based on the height and leaf */
2989 2935 pnode = lattice_leaf;
2990 2936 for (i = 0; i < height; i++) {
2991 2937 pnode = ehci_lattice_parent(pnode);
2992 2938 }
2993 2939
2994 2940 return (pnode);
2995 2941 }
2996 2942
2997 2943
2998 2944 /*
2999 2945 * ehci_leftmost_leaf:
3000 2946 *
3001 2947 * Find the leftmost leaf in the subtree specified by the node. Height refers
3002 2948 * to number of nodes from the bottom of the tree to the node, including the
3003 2949 * node.
3004 2950 *
3005 2951 * The formula for a zero based tree is:
3006 2952 * 2^H * Node + 2^H - 1
3007 2953 * The leaf of the tree is an array, convert the number for the array.
3008 2954 * Subtract the size of nodes not in the array
3009 2955 * 2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) =
3010 2956 * 2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS =
3011 2957 * 2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS
3012 2958 * 0
3013 2959 * 1 2
3014 2960 * 0 1 2 3
3015 2961 */
3016 2962 static uint_t
3017 2963 ehci_leftmost_leaf(
3018 2964 uint_t node,
3019 2965 uint_t height)
3020 2966 {
3021 2967 return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS);
3022 2968 }
3023 2969
3024 2970
3025 2971 /*
3026 2972 * ehci_pow_2:
3027 2973 *
3028 2974 * Compute 2 to the power
3029 2975 */
3030 2976 static uint_t
3031 2977 ehci_pow_2(uint_t x)
3032 2978 {
3033 2979 if (x == 0) {
3034 2980
3035 2981 return (1);
3036 2982 } else {
3037 2983
3038 2984 return (2 << (x - 1));
3039 2985 }
3040 2986 }
3041 2987
3042 2988
3043 2989 /*
3044 2990 * ehci_log_2:
3045 2991 *
3046 2992 * Compute log base 2 of x
3047 2993 */
3048 2994 static uint_t
3049 2995 ehci_log_2(uint_t x)
3050 2996 {
3051 2997 int i = 0;
3052 2998
3053 2999 while (x != 1) {
3054 3000 x = x >> 1;
3055 3001 i++;
3056 3002 }
3057 3003
3058 3004 return (i);
3059 3005 }
3060 3006
3061 3007
3062 3008 /*
3063 3009 * ehci_find_bestfit_hs_mask:
3064 3010 *
3065 3011 * Find the smask and cmask in the bandwidth allocation, and update the
3066 3012 * bandwidth allocation.
3067 3013 */
3068 3014 static int
3069 3015 ehci_find_bestfit_hs_mask(
3070 3016 ehci_state_t *ehcip,
3071 3017 uchar_t *smask,
3072 3018 uint_t *pnode,
3073 3019 usb_ep_descr_t *endpoint,
3074 3020 uint_t bandwidth,
3075 3021 int interval)
3076 3022 {
3077 3023 int i;
3078 3024 uint_t elements, index;
3079 3025 int array_leaf, best_array_leaf;
3080 3026 uint_t node_bandwidth, best_node_bandwidth;
3081 3027 uint_t leaf_count;
3082 3028 uchar_t bw_mask;
3083 3029 uchar_t best_smask;
3084 3030
3085 3031 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3086 3032 "ehci_find_bestfit_hs_mask: ");
3087 3033
3088 3034 /* Get all the valid smasks */
3089 3035 switch (ehci_pow_2(endpoint->bInterval - 1)) {
3090 3036 case EHCI_INTR_1US_POLL:
3091 3037 index = EHCI_1US_MASK_INDEX;
3092 3038 elements = EHCI_INTR_1US_POLL;
3093 3039 break;
3094 3040 case EHCI_INTR_2US_POLL:
3095 3041 index = EHCI_2US_MASK_INDEX;
3096 3042 elements = EHCI_INTR_2US_POLL;
3097 3043 break;
3098 3044 case EHCI_INTR_4US_POLL:
3099 3045 index = EHCI_4US_MASK_INDEX;
3100 3046 elements = EHCI_INTR_4US_POLL;
3101 3047 break;
3102 3048 case EHCI_INTR_XUS_POLL:
3103 3049 default:
3104 3050 index = EHCI_XUS_MASK_INDEX;
3105 3051 elements = EHCI_INTR_XUS_POLL;
3106 3052 break;
3107 3053 }
3108 3054
3109 3055 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3110 3056
3111 3057 /*
3112 3058 * Because of the way the leaves are setup, we will automatically
3113 3059 * hit the leftmost leaf of every possible node with this interval.
3114 3060 */
3115 3061 best_smask = 0x00;
3116 3062 best_node_bandwidth = 0;
3117 3063 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3118 3064 /* Find the bandwidth mask */
3119 3065 node_bandwidth = ehci_calculate_bw_availability_mask(ehcip,
3120 3066 bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask);
3121 3067
3122 3068 /*
3123 3069 * If this node cannot support our requirements skip to the
3124 3070 * next leaf.
3125 3071 */
3126 3072 if (bw_mask == 0x00) {
3127 3073 continue;
3128 3074 }
3129 3075
3130 3076 /*
3131 3077 * Now make sure our bandwidth requirements can be
3132 3078 * satisfied with one of smasks in this node.
3133 3079 */
3134 3080 *smask = 0x00;
3135 3081 for (i = index; i < (index + elements); i++) {
3136 3082 /* Check the start split mask value */
3137 3083 if (ehci_start_split_mask[index] & bw_mask) {
3138 3084 *smask = ehci_start_split_mask[index];
3139 3085 break;
3140 3086 }
3141 3087 }
3142 3088
3143 3089 /*
3144 3090 * If an appropriate smask is found save the information if:
3145 3091 * o best_smask has not been found yet.
3146 3092 * - or -
3147 3093 * o This is the node with the least amount of bandwidth
3148 3094 */
3149 3095 if ((*smask != 0x00) &&
3150 3096 ((best_smask == 0x00) ||
3151 3097 (best_node_bandwidth > node_bandwidth))) {
3152 3098
3153 3099 best_node_bandwidth = node_bandwidth;
3154 3100 best_array_leaf = array_leaf;
3155 3101 best_smask = *smask;
3156 3102 }
3157 3103 }
3158 3104
3159 3105 /*
3160 3106 * If we find node that can handle the bandwidth populate the
3161 3107 * appropriate variables and return success.
3162 3108 */
3163 3109 if (best_smask) {
3164 3110 *smask = best_smask;
3165 3111 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3166 3112 interval);
3167 3113 ehci_update_bw_availability(ehcip, bandwidth,
3168 3114 ehci_index[best_array_leaf], leaf_count, best_smask);
3169 3115
3170 3116 return (USB_SUCCESS);
3171 3117 }
3172 3118
3173 3119 return (USB_FAILURE);
3174 3120 }
3175 3121
3176 3122
3177 3123 /*
3178 3124 * ehci_find_bestfit_ls_intr_mask:
3179 3125 *
3180 3126 * Find the smask and cmask in the bandwidth allocation.
3181 3127 */
3182 3128 static int
3183 3129 ehci_find_bestfit_ls_intr_mask(
3184 3130 ehci_state_t *ehcip,
3185 3131 uchar_t *smask,
3186 3132 uchar_t *cmask,
3187 3133 uint_t *pnode,
3188 3134 uint_t sbandwidth,
3189 3135 uint_t cbandwidth,
3190 3136 int interval)
3191 3137 {
3192 3138 int i;
3193 3139 uint_t elements, index;
3194 3140 int array_leaf, best_array_leaf;
3195 3141 uint_t node_sbandwidth, node_cbandwidth;
3196 3142 uint_t best_node_bandwidth;
3197 3143 uint_t leaf_count;
3198 3144 uchar_t bw_smask, bw_cmask;
3199 3145 uchar_t best_smask, best_cmask;
3200 3146
3201 3147 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3202 3148 "ehci_find_bestfit_ls_intr_mask: ");
3203 3149
3204 3150 /* For low and full speed devices */
3205 3151 index = EHCI_XUS_MASK_INDEX;
3206 3152 elements = EHCI_INTR_4MS_POLL;
3207 3153
3208 3154 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3209 3155
3210 3156 /*
3211 3157 * Because of the way the leaves are setup, we will automatically
3212 3158 * hit the leftmost leaf of every possible node with this interval.
3213 3159 */
3214 3160 best_smask = 0x00;
3215 3161 best_node_bandwidth = 0;
3216 3162 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3217 3163 /* Find the bandwidth mask */
3218 3164 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3219 3165 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3220 3166 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3221 3167 cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask);
3222 3168
3223 3169 /*
3224 3170 * If this node cannot support our requirements skip to the
3225 3171 * next leaf.
3226 3172 */
3227 3173 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3228 3174 continue;
3229 3175 }
3230 3176
3231 3177 /*
3232 3178 * Now make sure our bandwidth requirements can be
3233 3179 * satisfied with one of smasks in this node.
3234 3180 */
3235 3181 *smask = 0x00;
3236 3182 *cmask = 0x00;
3237 3183 for (i = index; i < (index + elements); i++) {
3238 3184 /* Check the start split mask value */
3239 3185 if ((ehci_start_split_mask[index] & bw_smask) &&
3240 3186 (ehci_intr_complete_split_mask[index] & bw_cmask)) {
3241 3187 *smask = ehci_start_split_mask[index];
3242 3188 *cmask = ehci_intr_complete_split_mask[index];
3243 3189 break;
3244 3190 }
3245 3191 }
3246 3192
3247 3193 /*
3248 3194 * If an appropriate smask is found save the information if:
3249 3195 * o best_smask has not been found yet.
3250 3196 * - or -
3251 3197 * o This is the node with the least amount of bandwidth
3252 3198 */
3253 3199 if ((*smask != 0x00) &&
3254 3200 ((best_smask == 0x00) ||
3255 3201 (best_node_bandwidth >
3256 3202 (node_sbandwidth + node_cbandwidth)))) {
3257 3203 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3258 3204 best_array_leaf = array_leaf;
3259 3205 best_smask = *smask;
3260 3206 best_cmask = *cmask;
3261 3207 }
3262 3208 }
3263 3209
3264 3210 /*
3265 3211 * If we find node that can handle the bandwidth populate the
3266 3212 * appropriate variables and return success.
3267 3213 */
3268 3214 if (best_smask) {
3269 3215 *smask = best_smask;
3270 3216 *cmask = best_cmask;
3271 3217 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3272 3218 interval);
3273 3219 ehci_update_bw_availability(ehcip, sbandwidth,
3274 3220 ehci_index[best_array_leaf], leaf_count, best_smask);
3275 3221 ehci_update_bw_availability(ehcip, cbandwidth,
3276 3222 ehci_index[best_array_leaf], leaf_count, best_cmask);
3277 3223
3278 3224 return (USB_SUCCESS);
3279 3225 }
3280 3226
3281 3227 return (USB_FAILURE);
3282 3228 }
3283 3229
3284 3230
3285 3231 /*
3286 3232 * ehci_find_bestfit_sitd_in_mask:
3287 3233 *
3288 3234 * Find the smask and cmask in the bandwidth allocation.
3289 3235 */
3290 3236 static int
3291 3237 ehci_find_bestfit_sitd_in_mask(
3292 3238 ehci_state_t *ehcip,
3293 3239 uchar_t *smask,
3294 3240 uchar_t *cmask,
3295 3241 uint_t *pnode,
3296 3242 uint_t sbandwidth,
3297 3243 uint_t cbandwidth,
3298 3244 int interval)
3299 3245 {
3300 3246 int i, uFrames, found;
3301 3247 int array_leaf, best_array_leaf;
3302 3248 uint_t node_sbandwidth, node_cbandwidth;
3303 3249 uint_t best_node_bandwidth;
3304 3250 uint_t leaf_count;
3305 3251 uchar_t bw_smask, bw_cmask;
3306 3252 uchar_t best_smask, best_cmask;
3307 3253
3308 3254 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3309 3255 "ehci_find_bestfit_sitd_in_mask: ");
3310 3256
3311 3257 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3312 3258
3313 3259 /*
3314 3260 * Because of the way the leaves are setup, we will automatically
3315 3261 * hit the leftmost leaf of every possible node with this interval.
3316 3262 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3317 3263 */
3318 3264 /*
3319 3265 * Need to add an additional 2 uFrames, if the "L"ast
3320 3266 * complete split is before uFrame 6. See section
3321 3267 * 11.8.4 in USB 2.0 Spec. Currently we do not support
3322 3268 * the "Back Ptr" which means we support on IN of
3323 3269 * ~4*MAX_UFRAME_SITD_XFER bandwidth/
3324 3270 */
3325 3271 uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2;
3326 3272 if (cbandwidth % MAX_UFRAME_SITD_XFER) {
3327 3273 uFrames++;
3328 3274 }
3329 3275 if (uFrames > 6) {
3330 3276
3331 3277 return (USB_FAILURE);
3332 3278 }
3333 3279 *smask = 0x1;
3334 3280 *cmask = 0x00;
3335 3281 for (i = 0; i < uFrames; i++) {
3336 3282 *cmask = *cmask << 1;
3337 3283 *cmask |= 0x1;
3338 3284 }
3339 3285 /* cmask must start 2 frames after the smask */
3340 3286 *cmask = *cmask << 2;
3341 3287
3342 3288 found = 0;
3343 3289 best_smask = 0x00;
3344 3290 best_node_bandwidth = 0;
3345 3291 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3346 3292 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3347 3293 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3348 3294 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3349 3295 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3350 3296 &bw_cmask);
3351 3297
3352 3298 /*
3353 3299 * If this node cannot support our requirements skip to the
3354 3300 * next leaf.
3355 3301 */
3356 3302 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3357 3303 continue;
3358 3304 }
3359 3305
3360 3306 for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) {
3361 3307 if ((*smask & bw_smask) && (*cmask & bw_cmask)) {
3362 3308 found = 1;
3363 3309 break;
3364 3310 }
3365 3311 *smask = *smask << 1;
3366 3312 *cmask = *cmask << 1;
3367 3313 }
3368 3314
3369 3315 /*
3370 3316 * If an appropriate smask is found save the information if:
3371 3317 * o best_smask has not been found yet.
3372 3318 * - or -
3373 3319 * o This is the node with the least amount of bandwidth
3374 3320 */
3375 3321 if (found &&
3376 3322 ((best_smask == 0x00) ||
3377 3323 (best_node_bandwidth >
3378 3324 (node_sbandwidth + node_cbandwidth)))) {
3379 3325 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3380 3326 best_array_leaf = array_leaf;
3381 3327 best_smask = *smask;
3382 3328 best_cmask = *cmask;
3383 3329 }
3384 3330 }
3385 3331
3386 3332 /*
3387 3333 * If we find node that can handle the bandwidth populate the
3388 3334 * appropriate variables and return success.
3389 3335 */
3390 3336 if (best_smask) {
3391 3337 *smask = best_smask;
3392 3338 *cmask = best_cmask;
3393 3339 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3394 3340 interval);
3395 3341 ehci_update_bw_availability(ehcip, sbandwidth,
3396 3342 ehci_index[best_array_leaf], leaf_count, best_smask);
3397 3343 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3398 3344 ehci_index[best_array_leaf], leaf_count, best_cmask);
3399 3345
3400 3346 return (USB_SUCCESS);
3401 3347 }
3402 3348
3403 3349 return (USB_FAILURE);
3404 3350 }
3405 3351
3406 3352
3407 3353 /*
3408 3354 * ehci_find_bestfit_sitd_out_mask:
3409 3355 *
3410 3356 * Find the smask in the bandwidth allocation.
3411 3357 */
3412 3358 static int
3413 3359 ehci_find_bestfit_sitd_out_mask(
3414 3360 ehci_state_t *ehcip,
3415 3361 uchar_t *smask,
3416 3362 uint_t *pnode,
3417 3363 uint_t sbandwidth,
3418 3364 int interval)
3419 3365 {
3420 3366 int i, uFrames, found;
3421 3367 int array_leaf, best_array_leaf;
3422 3368 uint_t node_sbandwidth;
3423 3369 uint_t best_node_bandwidth;
3424 3370 uint_t leaf_count;
3425 3371 uchar_t bw_smask;
3426 3372 uchar_t best_smask;
3427 3373
3428 3374 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3429 3375 "ehci_find_bestfit_sitd_out_mask: ");
3430 3376
3431 3377 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3432 3378
3433 3379 /*
3434 3380 * Because of the way the leaves are setup, we will automatically
3435 3381 * hit the leftmost leaf of every possible node with this interval.
3436 3382 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3437 3383 */
3438 3384 *smask = 0x00;
3439 3385 uFrames = sbandwidth / MAX_UFRAME_SITD_XFER;
3440 3386 if (sbandwidth % MAX_UFRAME_SITD_XFER) {
3441 3387 uFrames++;
3442 3388 }
3443 3389 for (i = 0; i < uFrames; i++) {
3444 3390 *smask = *smask << 1;
3445 3391 *smask |= 0x1;
3446 3392 }
3447 3393
3448 3394 found = 0;
3449 3395 best_smask = 0x00;
3450 3396 best_node_bandwidth = 0;
3451 3397 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3452 3398 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3453 3399 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3454 3400 &bw_smask);
3455 3401
3456 3402 /*
3457 3403 * If this node cannot support our requirements skip to the
3458 3404 * next leaf.
3459 3405 */
3460 3406 if (bw_smask == 0x00) {
3461 3407 continue;
3462 3408 }
3463 3409
3464 3410 /* You cannot have a start split on the 8th uFrame */
3465 3411 for (i = 0; (*smask & 0x80) == 0; i++) {
3466 3412 if (*smask & bw_smask) {
3467 3413 found = 1;
3468 3414 break;
3469 3415 }
3470 3416 *smask = *smask << 1;
3471 3417 }
3472 3418
3473 3419 /*
3474 3420 * If an appropriate smask is found save the information if:
3475 3421 * o best_smask has not been found yet.
3476 3422 * - or -
3477 3423 * o This is the node with the least amount of bandwidth
3478 3424 */
3479 3425 if (found &&
3480 3426 ((best_smask == 0x00) ||
3481 3427 (best_node_bandwidth > node_sbandwidth))) {
3482 3428 best_node_bandwidth = node_sbandwidth;
3483 3429 best_array_leaf = array_leaf;
3484 3430 best_smask = *smask;
3485 3431 }
3486 3432 }
3487 3433
3488 3434 /*
3489 3435 * If we find node that can handle the bandwidth populate the
3490 3436 * appropriate variables and return success.
3491 3437 */
3492 3438 if (best_smask) {
3493 3439 *smask = best_smask;
3494 3440 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3495 3441 interval);
3496 3442 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3497 3443 ehci_index[best_array_leaf], leaf_count, best_smask);
3498 3444
3499 3445 return (USB_SUCCESS);
3500 3446 }
3501 3447
3502 3448 return (USB_FAILURE);
3503 3449 }
3504 3450
3505 3451
3506 3452 /*
3507 3453 * ehci_calculate_bw_availability_mask:
3508 3454 *
3509 3455 * Returns the "total bandwidth used" in this node.
3510 3456 * Populates bw_mask with the uFrames that can support the bandwidth.
3511 3457 *
3512 3458 * If all the Frames cannot support this bandwidth, then bw_mask
3513 3459 * will return 0x00 and the "total bandwidth used" will be invalid.
3514 3460 */
3515 3461 static uint_t
3516 3462 ehci_calculate_bw_availability_mask(
3517 3463 ehci_state_t *ehcip,
3518 3464 uint_t bandwidth,
3519 3465 int leaf,
3520 3466 int leaf_count,
3521 3467 uchar_t *bw_mask)
3522 3468 {
3523 3469 int i, j;
3524 3470 uchar_t bw_uframe;
3525 3471 int uframe_total;
3526 3472 ehci_frame_bandwidth_t *fbp;
3527 3473 uint_t total_bandwidth = 0;
3528 3474
3529 3475 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3530 3476 "ehci_calculate_bw_availability_mask: leaf %d leaf count %d",
3531 3477 leaf, leaf_count);
3532 3478
3533 3479 /* Start by saying all uFrames are available */
3534 3480 *bw_mask = 0xFF;
3535 3481
3536 3482 for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) {
3537 3483 fbp = &ehcip->ehci_frame_bandwidth[leaf + i];
3538 3484
3539 3485 total_bandwidth += fbp->ehci_allocated_frame_bandwidth;
3540 3486
3541 3487 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3542 3488 /*
3543 3489 * If the uFrame in bw_mask is available check to see if
3544 3490 * it can support the additional bandwidth.
3545 3491 */
3546 3492 bw_uframe = (*bw_mask & (0x1 << j));
3547 3493 uframe_total =
3548 3494 fbp->ehci_micro_frame_bandwidth[j] +
3549 3495 bandwidth;
3550 3496 if ((bw_uframe) &&
3551 3497 (uframe_total > HS_PERIODIC_BANDWIDTH)) {
3552 3498 *bw_mask = *bw_mask & ~bw_uframe;
3553 3499 }
3554 3500 }
3555 3501 }
3556 3502
3557 3503 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3558 3504 "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x",
3559 3505 *bw_mask);
3560 3506
3561 3507 return (total_bandwidth);
3562 3508 }
3563 3509
3564 3510
3565 3511 /*
3566 3512 * ehci_update_bw_availability:
3567 3513 *
3568 3514 * The leftmost leaf needs to be in terms of array position and
3569 3515 * not the actual lattice position.
3570 3516 */
3571 3517 static void
3572 3518 ehci_update_bw_availability(
3573 3519 ehci_state_t *ehcip,
3574 3520 int bandwidth,
3575 3521 int leftmost_leaf,
3576 3522 int leaf_count,
3577 3523 uchar_t mask)
3578 3524 {
3579 3525 int i, j;
3580 3526 ehci_frame_bandwidth_t *fbp;
3581 3527 int uFrame_bandwidth[8];
3582 3528
3583 3529 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3584 3530 "ehci_update_bw_availability: "
3585 3531 "leaf %d count %d bandwidth 0x%x mask 0x%x",
3586 3532 leftmost_leaf, leaf_count, bandwidth, mask);
3587 3533
3588 3534 ASSERT(leftmost_leaf < 32);
3589 3535 ASSERT(leftmost_leaf >= 0);
3590 3536
3591 3537 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3592 3538 if (mask & 0x1) {
3593 3539 uFrame_bandwidth[j] = bandwidth;
3594 3540 } else {
3595 3541 uFrame_bandwidth[j] = 0;
3596 3542 }
3597 3543
3598 3544 mask = mask >> 1;
3599 3545 }
3600 3546
3601 3547 /* Updated all the effected leafs with the bandwidth */
3602 3548 for (i = 0; i < leaf_count; i++) {
3603 3549 fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i];
3604 3550
3605 3551 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3606 3552 fbp->ehci_micro_frame_bandwidth[j] +=
3607 3553 uFrame_bandwidth[j];
3608 3554 fbp->ehci_allocated_frame_bandwidth +=
3609 3555 uFrame_bandwidth[j];
3610 3556 }
3611 3557 }
3612 3558 }
3613 3559
3614 3560 /*
3615 3561 * Miscellaneous functions
3616 3562 */
3617 3563
3618 3564 /*
3619 3565 * ehci_obtain_state:
3620 3566 *
3621 3567 * NOTE: This function is also called from POLLED MODE.
3622 3568 */
3623 3569 ehci_state_t *
3624 3570 ehci_obtain_state(dev_info_t *dip)
3625 3571 {
3626 3572 int instance = ddi_get_instance(dip);
3627 3573
3628 3574 ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance);
3629 3575
3630 3576 ASSERT(state != NULL);
3631 3577
3632 3578 return (state);
3633 3579 }
3634 3580
3635 3581
3636 3582 /*
3637 3583 * ehci_state_is_operational:
3638 3584 *
3639 3585 * Check the Host controller state and return proper values.
3640 3586 */
3641 3587 int
3642 3588 ehci_state_is_operational(ehci_state_t *ehcip)
3643 3589 {
3644 3590 int val;
3645 3591
3646 3592 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3647 3593
3648 3594 switch (ehcip->ehci_hc_soft_state) {
3649 3595 case EHCI_CTLR_INIT_STATE:
3650 3596 case EHCI_CTLR_SUSPEND_STATE:
3651 3597 val = USB_FAILURE;
3652 3598 break;
3653 3599 case EHCI_CTLR_OPERATIONAL_STATE:
3654 3600 val = USB_SUCCESS;
3655 3601 break;
3656 3602 case EHCI_CTLR_ERROR_STATE:
3657 3603 val = USB_HC_HARDWARE_ERROR;
3658 3604 break;
3659 3605 default:
3660 3606 val = USB_FAILURE;
3661 3607 break;
3662 3608 }
3663 3609
3664 3610 return (val);
3665 3611 }
3666 3612
3667 3613
3668 3614 /*
3669 3615 * ehci_do_soft_reset
3670 3616 *
3671 3617 * Do soft reset of ehci host controller.
3672 3618 */
3673 3619 int
3674 3620 ehci_do_soft_reset(ehci_state_t *ehcip)
3675 3621 {
3676 3622 usb_frame_number_t before_frame_number, after_frame_number;
3677 3623 ehci_regs_t *ehci_save_regs;
3678 3624
3679 3625 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3680 3626
3681 3627 /* Increment host controller error count */
3682 3628 ehcip->ehci_hc_error++;
3683 3629
3684 3630 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3685 3631 "ehci_do_soft_reset:"
3686 3632 "Reset ehci host controller 0x%x", ehcip->ehci_hc_error);
3687 3633
3688 3634 /*
3689 3635 * Allocate space for saving current Host Controller
3690 3636 * registers. Don't do any recovery if allocation
3691 3637 * fails.
3692 3638 */
3693 3639 ehci_save_regs = (ehci_regs_t *)
3694 3640 kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP);
3695 3641
3696 3642 if (ehci_save_regs == NULL) {
3697 3643 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3698 3644 "ehci_do_soft_reset: kmem_zalloc failed");
3699 3645
3700 3646 return (USB_FAILURE);
3701 3647 }
3702 3648
3703 3649 /* Save current ehci registers */
3704 3650 ehci_save_regs->ehci_command = Get_OpReg(ehci_command);
3705 3651 ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt);
3706 3652 ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment);
3707 3653 ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr);
3708 3654 ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag);
3709 3655 ehci_save_regs->ehci_periodic_list_base =
3710 3656 Get_OpReg(ehci_periodic_list_base);
3711 3657
3712 3658 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3713 3659 "ehci_do_soft_reset: Save reg = 0x%p", (void *)ehci_save_regs);
3714 3660
3715 3661 /* Disable all list processing and interrupts */
3716 3662 Set_OpReg(ehci_command, Get_OpReg(ehci_command) &
3717 3663 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE));
3718 3664
3719 3665 /* Disable all EHCI interrupts */
3720 3666 Set_OpReg(ehci_interrupt, 0);
3721 3667
3722 3668 /* Wait for few milliseconds */
3723 3669 drv_usecwait(EHCI_SOF_TIMEWAIT);
3724 3670
3725 3671 /* Do light soft reset of ehci host controller */
3726 3672 Set_OpReg(ehci_command,
3727 3673 Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET);
3728 3674
3729 3675 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3730 3676 "ehci_do_soft_reset: Reset in progress");
3731 3677
3732 3678 /* Wait for reset to complete */
3733 3679 drv_usecwait(EHCI_RESET_TIMEWAIT);
3734 3680
3735 3681 /*
3736 3682 * Restore previous saved EHCI register value
3737 3683 * into the current EHCI registers.
3738 3684 */
3739 3685 Set_OpReg(ehci_ctrl_segment, (uint32_t)
3740 3686 ehci_save_regs->ehci_ctrl_segment);
3741 3687
3742 3688 Set_OpReg(ehci_periodic_list_base, (uint32_t)
3743 3689 ehci_save_regs->ehci_periodic_list_base);
3744 3690
3745 3691 Set_OpReg(ehci_async_list_addr, (uint32_t)
3746 3692 ehci_save_regs->ehci_async_list_addr);
3747 3693
3748 3694 /*
3749 3695 * For some reason this register might get nulled out by
3750 3696 * the Uli M1575 South Bridge. To workaround the hardware
3751 3697 * problem, check the value after write and retry if the
3752 3698 * last write fails.
3753 3699 */
3754 3700 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
3755 3701 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
3756 3702 (ehci_save_regs->ehci_async_list_addr !=
3757 3703 Get_OpReg(ehci_async_list_addr))) {
3758 3704 int retry = 0;
3759 3705
3760 3706 Set_OpRegRetry(ehci_async_list_addr, (uint32_t)
3761 3707 ehci_save_regs->ehci_async_list_addr, retry);
3762 3708 if (retry >= EHCI_MAX_RETRY) {
3763 3709 USB_DPRINTF_L2(PRINT_MASK_ATTA,
3764 3710 ehcip->ehci_log_hdl, "ehci_do_soft_reset:"
3765 3711 " ASYNCLISTADDR write failed.");
3766 3712
3767 3713 return (USB_FAILURE);
3768 3714 }
3769 3715 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
3770 3716 "ehci_do_soft_reset: ASYNCLISTADDR "
3771 3717 "write failed, retry=%d", retry);
3772 3718 }
3773 3719
3774 3720 Set_OpReg(ehci_config_flag, (uint32_t)
3775 3721 ehci_save_regs->ehci_config_flag);
3776 3722
3777 3723 /* Enable both Asynchronous and Periodic Schedule if necessary */
3778 3724 ehci_toggle_scheduler(ehcip);
3779 3725
3780 3726 /*
3781 3727 * Set ehci_interrupt to enable all interrupts except Root
3782 3728 * Hub Status change and frame list rollover interrupts.
3783 3729 */
3784 3730 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
3785 3731 EHCI_INTR_FRAME_LIST_ROLLOVER |
3786 3732 EHCI_INTR_USB_ERROR |
3787 3733 EHCI_INTR_USB);
3788 3734
3789 3735 /*
3790 3736 * Deallocate the space that allocated for saving
3791 3737 * HC registers.
3792 3738 */
3793 3739 kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t));
3794 3740
3795 3741 /*
3796 3742 * Set the desired interrupt threshold, frame list size (if
3797 3743 * applicable) and turn EHCI host controller.
3798 3744 */
3799 3745 Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) &
3800 3746 ~EHCI_CMD_INTR_THRESHOLD) |
3801 3747 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
3802 3748
3803 3749 /* Wait 10ms for EHCI to start sending SOF */
3804 3750 drv_usecwait(EHCI_RESET_TIMEWAIT);
3805 3751
3806 3752 /*
3807 3753 * Get the current usb frame number before waiting for
3808 3754 * few milliseconds.
3809 3755 */
3810 3756 before_frame_number = ehci_get_current_frame_number(ehcip);
3811 3757
3812 3758 /* Wait for few milliseconds */
3813 3759 drv_usecwait(EHCI_SOF_TIMEWAIT);
3814 3760
3815 3761 /*
3816 3762 * Get the current usb frame number after waiting for
3817 3763 * few milliseconds.
3818 3764 */
3819 3765 after_frame_number = ehci_get_current_frame_number(ehcip);
3820 3766
3821 3767 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3822 3768 "ehci_do_soft_reset: Before Frame Number 0x%llx "
3823 3769 "After Frame Number 0x%llx",
3824 3770 (unsigned long long)before_frame_number,
3825 3771 (unsigned long long)after_frame_number);
3826 3772
3827 3773 if ((after_frame_number <= before_frame_number) &&
3828 3774 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
3829 3775
3830 3776 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3831 3777 "ehci_do_soft_reset: Soft reset failed");
3832 3778
3833 3779 return (USB_FAILURE);
3834 3780 }
3835 3781
3836 3782 return (USB_SUCCESS);
3837 3783 }
3838 3784
3839 3785
3840 3786 /*
3841 3787 * ehci_get_xfer_attrs:
3842 3788 *
3843 3789 * Get the attributes of a particular xfer.
3844 3790 *
3845 3791 * NOTE: This function is also called from POLLED MODE.
3846 3792 */
3847 3793 usb_req_attrs_t
3848 3794 ehci_get_xfer_attrs(
3849 3795 ehci_state_t *ehcip,
3850 3796 ehci_pipe_private_t *pp,
3851 3797 ehci_trans_wrapper_t *tw)
3852 3798 {
3853 3799 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
3854 3800 usb_req_attrs_t attrs = USB_ATTRS_NONE;
3855 3801
3856 3802 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3857 3803 "ehci_get_xfer_attrs:");
3858 3804
3859 3805 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
3860 3806 case USB_EP_ATTR_CONTROL:
3861 3807 attrs = ((usb_ctrl_req_t *)
3862 3808 tw->tw_curr_xfer_reqp)->ctrl_attributes;
3863 3809 break;
3864 3810 case USB_EP_ATTR_BULK:
3865 3811 attrs = ((usb_bulk_req_t *)
3866 3812 tw->tw_curr_xfer_reqp)->bulk_attributes;
3867 3813 break;
3868 3814 case USB_EP_ATTR_INTR:
3869 3815 attrs = ((usb_intr_req_t *)
3870 3816 tw->tw_curr_xfer_reqp)->intr_attributes;
3871 3817 break;
3872 3818 }
3873 3819
3874 3820 return (attrs);
3875 3821 }
3876 3822
3877 3823
3878 3824 /*
3879 3825 * ehci_get_current_frame_number:
3880 3826 *
3881 3827 * Get the current software based usb frame number.
3882 3828 */
3883 3829 usb_frame_number_t
3884 3830 ehci_get_current_frame_number(ehci_state_t *ehcip)
3885 3831 {
3886 3832 usb_frame_number_t usb_frame_number;
3887 3833 usb_frame_number_t ehci_fno, micro_frame_number;
3888 3834
3889 3835 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3890 3836
3891 3837 ehci_fno = ehcip->ehci_fno;
3892 3838 micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF;
3893 3839
3894 3840 /*
3895 3841 * Calculate current software based usb frame number.
3896 3842 *
3897 3843 * This code accounts for the fact that frame number is
3898 3844 * updated by the Host Controller before the ehci driver
3899 3845 * gets an FrameListRollover interrupt that will adjust
3900 3846 * Frame higher part.
3901 3847 *
3902 3848 * Refer ehci specification 1.0, section 2.3.2, page 21.
3903 3849 */
3904 3850 micro_frame_number = ((micro_frame_number & 0x1FFF) |
3905 3851 ehci_fno) + (((micro_frame_number & 0x3FFF) ^
3906 3852 ehci_fno) & 0x2000);
3907 3853
3908 3854 /*
3909 3855 * Micro Frame number is equivalent to 125 usec. Eight
3910 3856 * Micro Frame numbers are equivalent to one millsecond
3911 3857 * or one usb frame number.
3912 3858 */
3913 3859 usb_frame_number = micro_frame_number >>
3914 3860 EHCI_uFRAMES_PER_USB_FRAME_SHIFT;
3915 3861
3916 3862 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3917 3863 "ehci_get_current_frame_number: "
3918 3864 "Current usb uframe number = 0x%llx "
3919 3865 "Current usb frame number = 0x%llx",
3920 3866 (unsigned long long)micro_frame_number,
3921 3867 (unsigned long long)usb_frame_number);
3922 3868
3923 3869 return (usb_frame_number);
3924 3870 }
3925 3871
3926 3872
3927 3873 /*
3928 3874 * ehci_cpr_cleanup:
3929 3875 *
3930 3876 * Cleanup ehci state and other ehci specific informations across
3931 3877 * Check Point Resume (CPR).
3932 3878 */
3933 3879 static void
3934 3880 ehci_cpr_cleanup(ehci_state_t *ehcip)
3935 3881 {
3936 3882 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3937 3883
3938 3884 /* Reset software part of usb frame number */
3939 3885 ehcip->ehci_fno = 0;
3940 3886 }
3941 3887
3942 3888
3943 3889 /*
3944 3890 * ehci_wait_for_sof:
3945 3891 *
3946 3892 * Wait for couple of SOF interrupts
3947 3893 */
3948 3894 int
3949 3895 ehci_wait_for_sof(ehci_state_t *ehcip)
3950 3896 {
3951 3897 usb_frame_number_t before_frame_number, after_frame_number;
3952 3898 int error = USB_SUCCESS;
3953 3899
3954 3900 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3955 3901 ehcip->ehci_log_hdl, "ehci_wait_for_sof");
3956 3902
3957 3903 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3958 3904
3959 3905 error = ehci_state_is_operational(ehcip);
3960 3906
3961 3907 if (error != USB_SUCCESS) {
3962 3908
3963 3909 return (error);
3964 3910 }
3965 3911
3966 3912 /* Get the current usb frame number before waiting for two SOFs */
3967 3913 before_frame_number = ehci_get_current_frame_number(ehcip);
3968 3914
3969 3915 mutex_exit(&ehcip->ehci_int_mutex);
3970 3916
3971 3917 /* Wait for few milliseconds */
3972 3918 delay(drv_usectohz(EHCI_SOF_TIMEWAIT));
3973 3919
3974 3920 mutex_enter(&ehcip->ehci_int_mutex);
3975 3921
3976 3922 /* Get the current usb frame number after woken up */
3977 3923 after_frame_number = ehci_get_current_frame_number(ehcip);
3978 3924
3979 3925 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3980 3926 "ehci_wait_for_sof: framenumber: before 0x%llx "
3981 3927 "after 0x%llx",
3982 3928 (unsigned long long)before_frame_number,
3983 3929 (unsigned long long)after_frame_number);
3984 3930
3985 3931 /* Return failure, if usb frame number has not been changed */
3986 3932 if (after_frame_number <= before_frame_number) {
3987 3933
3988 3934 if ((ehci_do_soft_reset(ehcip)) != USB_SUCCESS) {
3989 3935
3990 3936 USB_DPRINTF_L0(PRINT_MASK_LISTS,
3991 3937 ehcip->ehci_log_hdl, "No SOF interrupts");
3992 3938
3993 3939 /* Set host controller soft state to error */
3994 3940 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
3995 3941
3996 3942 return (USB_FAILURE);
3997 3943 }
3998 3944
3999 3945 }
4000 3946
4001 3947 return (USB_SUCCESS);
4002 3948 }
4003 3949
4004 3950 /*
4005 3951 * Toggle the async/periodic schedule based on opened pipe count.
4006 3952 * During pipe cleanup(in pipe reset case), the pipe's QH is temporarily
4007 3953 * disabled. But the TW on the pipe is not freed. In this case, we need
4008 3954 * to disable async/periodic schedule for some non-compatible hardware.
4009 3955 * Otherwise, the hardware will overwrite software's configuration of
4010 3956 * the QH.
4011 3957 */
4012 3958 void
4013 3959 ehci_toggle_scheduler_on_pipe(ehci_state_t *ehcip)
4014 3960 {
4015 3961 uint_t temp_reg, cmd_reg;
4016 3962
4017 3963 cmd_reg = Get_OpReg(ehci_command);
4018 3964 temp_reg = cmd_reg;
4019 3965
4020 3966 /*
4021 3967 * Enable/Disable asynchronous scheduler, and
4022 3968 * turn on/off async list door bell
4023 3969 */
4024 3970 if (ehcip->ehci_open_async_count) {
4025 3971 if ((ehcip->ehci_async_req_count > 0) &&
4026 3972 ((cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE) == 0)) {
4027 3973 /*
4028 3974 * For some reason this address might get nulled out by
4029 3975 * the ehci chip. Set it here just in case it is null.
4030 3976 */
4031 3977 Set_OpReg(ehci_async_list_addr,
4032 3978 ehci_qh_cpu_to_iommu(ehcip,
4033 3979 ehcip->ehci_head_of_async_sched_list));
4034 3980
4035 3981 /*
4036 3982 * For some reason this register might get nulled out by
4037 3983 * the Uli M1575 Southbridge. To workaround the HW
4038 3984 * problem, check the value after write and retry if the
4039 3985 * last write fails.
4040 3986 *
4041 3987 * If the ASYNCLISTADDR remains "stuck" after
4042 3988 * EHCI_MAX_RETRY retries, then the M1575 is broken
4043 3989 * and is stuck in an inconsistent state and is about
4044 3990 * to crash the machine with a trn_oor panic when it
4045 3991 * does a DMA read from 0x0. It is better to panic
4046 3992 * now rather than wait for the trn_oor crash; this
4047 3993 * way Customer Service will have a clean signature
4048 3994 * that indicts the M1575 chip rather than a
4049 3995 * mysterious and hard-to-diagnose trn_oor panic.
4050 3996 */
4051 3997 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4052 3998 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4053 3999 (ehci_qh_cpu_to_iommu(ehcip,
4054 4000 ehcip->ehci_head_of_async_sched_list) !=
4055 4001 Get_OpReg(ehci_async_list_addr))) {
4056 4002 int retry = 0;
4057 4003
4058 4004 Set_OpRegRetry(ehci_async_list_addr,
4059 4005 ehci_qh_cpu_to_iommu(ehcip,
4060 4006 ehcip->ehci_head_of_async_sched_list),
4061 4007 retry);
4062 4008 if (retry >= EHCI_MAX_RETRY)
4063 4009 cmn_err(CE_PANIC,
4064 4010 "ehci_toggle_scheduler_on_pipe: "
4065 4011 "ASYNCLISTADDR write failed.");
4066 4012
4067 4013 USB_DPRINTF_L2(PRINT_MASK_ATTA,
4068 4014 ehcip->ehci_log_hdl,
4069 4015 "ehci_toggle_scheduler_on_pipe:"
4070 4016 " ASYNCLISTADDR write failed, retry=%d",
4071 4017 retry);
4072 4018 }
4073 4019
4074 4020 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4075 4021 }
4076 4022 } else {
4077 4023 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4078 4024 }
4079 4025
4080 4026 if (ehcip->ehci_open_periodic_count) {
4081 4027 if ((ehcip->ehci_periodic_req_count > 0) &&
4082 4028 ((cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE) == 0)) {
4083 4029 /*
4084 4030 * For some reason this address get's nulled out by
4085 4031 * the ehci chip. Set it here just in case it is null.
4086 4032 */
4087 4033 Set_OpReg(ehci_periodic_list_base,
4088 4034 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4089 4035 0xFFFFF000));
4090 4036 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4091 4037 }
4092 4038 } else {
4093 4039 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4094 4040 }
4095 4041
4096 4042 /* Just an optimization */
4097 4043 if (temp_reg != cmd_reg) {
4098 4044 Set_OpReg(ehci_command, cmd_reg);
4099 4045 }
4100 4046 }
4101 4047
4102 4048
4103 4049 /*
4104 4050 * ehci_toggle_scheduler:
4105 4051 *
4106 4052 * Turn scheduler based on pipe open count.
4107 4053 */
4108 4054 void
4109 4055 ehci_toggle_scheduler(ehci_state_t *ehcip)
4110 4056 {
4111 4057 uint_t temp_reg, cmd_reg;
4112 4058
4113 4059 /*
4114 4060 * For performance optimization, we need to change the bits
4115 4061 * if (async == 1||async == 0) OR (periodic == 1||periodic == 0)
4116 4062 *
4117 4063 * Related bits already enabled if
4118 4064 * async and periodic req counts are > 1
4119 4065 * OR async req count > 1 & no periodic pipe
4120 4066 * OR periodic req count > 1 & no async pipe
4121 4067 */
4122 4068 if (((ehcip->ehci_async_req_count > 1) &&
4123 4069 (ehcip->ehci_periodic_req_count > 1)) ||
4124 4070 ((ehcip->ehci_async_req_count > 1) &&
4125 4071 (ehcip->ehci_open_periodic_count == 0)) ||
4126 4072 ((ehcip->ehci_periodic_req_count > 1) &&
4127 4073 (ehcip->ehci_open_async_count == 0))) {
4128 4074 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4129 4075 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4130 4076 "async/periodic bits no need to change");
4131 4077
4132 4078 return;
4133 4079 }
4134 4080
4135 4081 cmd_reg = Get_OpReg(ehci_command);
4136 4082 temp_reg = cmd_reg;
4137 4083
4138 4084 /*
4139 4085 * Enable/Disable asynchronous scheduler, and
4140 4086 * turn on/off async list door bell
4141 4087 */
4142 4088 if (ehcip->ehci_async_req_count > 1) {
4143 4089 /* we already enable the async bit */
4144 4090 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4145 4091 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4146 4092 "async bit already enabled: cmd_reg=0x%x", cmd_reg);
4147 4093 } else if (ehcip->ehci_async_req_count == 1) {
4148 4094 if (!(cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE)) {
4149 4095 /*
4150 4096 * For some reason this address might get nulled out by
4151 4097 * the ehci chip. Set it here just in case it is null.
4152 4098 * If it's not null, we should not reset the
4153 4099 * ASYNCLISTADDR, because it's updated by hardware to
4154 4100 * point to the next queue head to be executed.
4155 4101 */
4156 4102 if (!Get_OpReg(ehci_async_list_addr)) {
4157 4103 Set_OpReg(ehci_async_list_addr,
4158 4104 ehci_qh_cpu_to_iommu(ehcip,
4159 4105 ehcip->ehci_head_of_async_sched_list));
4160 4106 }
4161 4107
4162 4108 /*
4163 4109 * For some reason this register might get nulled out by
4164 4110 * the Uli M1575 Southbridge. To workaround the HW
4165 4111 * problem, check the value after write and retry if the
4166 4112 * last write fails.
4167 4113 *
4168 4114 * If the ASYNCLISTADDR remains "stuck" after
4169 4115 * EHCI_MAX_RETRY retries, then the M1575 is broken
4170 4116 * and is stuck in an inconsistent state and is about
4171 4117 * to crash the machine with a trn_oor panic when it
4172 4118 * does a DMA read from 0x0. It is better to panic
4173 4119 * now rather than wait for the trn_oor crash; this
4174 4120 * way Customer Service will have a clean signature
4175 4121 * that indicts the M1575 chip rather than a
4176 4122 * mysterious and hard-to-diagnose trn_oor panic.
4177 4123 */
4178 4124 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4179 4125 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4180 4126 (ehci_qh_cpu_to_iommu(ehcip,
4181 4127 ehcip->ehci_head_of_async_sched_list) !=
4182 4128 Get_OpReg(ehci_async_list_addr))) {
4183 4129 int retry = 0;
4184 4130
4185 4131 Set_OpRegRetry(ehci_async_list_addr,
4186 4132 ehci_qh_cpu_to_iommu(ehcip,
4187 4133 ehcip->ehci_head_of_async_sched_list),
4188 4134 retry);
4189 4135 if (retry >= EHCI_MAX_RETRY)
4190 4136 cmn_err(CE_PANIC,
4191 4137 "ehci_toggle_scheduler: "
4192 4138 "ASYNCLISTADDR write failed.");
4193 4139
4194 4140 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4195 4141 ehcip->ehci_log_hdl,
4196 4142 "ehci_toggle_scheduler: ASYNCLISTADDR "
4197 4143 "write failed, retry=%d", retry);
4198 4144 }
4199 4145 }
4200 4146 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4201 4147 } else {
4202 4148 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4203 4149 }
4204 4150
4205 4151 if (ehcip->ehci_periodic_req_count > 1) {
4206 4152 /* we already enable the periodic bit. */
4207 4153 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4208 4154 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4209 4155 "periodic bit already enabled: cmd_reg=0x%x", cmd_reg);
4210 4156 } else if (ehcip->ehci_periodic_req_count == 1) {
4211 4157 if (!(cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE)) {
4212 4158 /*
4213 4159 * For some reason this address get's nulled out by
4214 4160 * the ehci chip. Set it here just in case it is null.
4215 4161 */
4216 4162 Set_OpReg(ehci_periodic_list_base,
4217 4163 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4218 4164 0xFFFFF000));
4219 4165 }
4220 4166 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4221 4167 } else {
4222 4168 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4223 4169 }
4224 4170
4225 4171 /* Just an optimization */
4226 4172 if (temp_reg != cmd_reg) {
4227 4173 Set_OpReg(ehci_command, cmd_reg);
4228 4174
4229 4175 /* To make sure the command register is updated correctly */
4230 4176 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4231 4177 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
4232 4178 int retry = 0;
4233 4179
4234 4180 Set_OpRegRetry(ehci_command, cmd_reg, retry);
4235 4181 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4236 4182 ehcip->ehci_log_hdl,
4237 4183 "ehci_toggle_scheduler: CMD write failed, retry=%d",
4238 4184 retry);
4239 4185 }
4240 4186
4241 4187 }
4242 4188 }
4243 4189
4244 4190 /*
4245 4191 * ehci print functions
4246 4192 */
4247 4193
4248 4194 /*
4249 4195 * ehci_print_caps:
4250 4196 */
4251 4197 void
4252 4198 ehci_print_caps(ehci_state_t *ehcip)
4253 4199 {
4254 4200 uint_t i;
4255 4201
4256 4202 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4257 4203 "\n\tUSB 2.0 Host Controller Characteristics\n");
4258 4204
4259 4205 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4260 4206 "Caps Length: 0x%x Version: 0x%x\n",
4261 4207 Get_8Cap(ehci_caps_length), Get_16Cap(ehci_version));
4262 4208
4263 4209 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4264 4210 "Structural Parameters\n");
4265 4211 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4266 4212 "Port indicators: %s", (Get_Cap(ehci_hcs_params) &
4267 4213 EHCI_HCS_PORT_INDICATOR) ? "Yes" : "No");
4268 4214 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4269 4215 "No of Classic host controllers: 0x%x",
4270 4216 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_COMP_CTRLS)
4271 4217 >> EHCI_HCS_NUM_COMP_CTRL_SHIFT);
4272 4218 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4273 4219 "No of ports per Classic host controller: 0x%x",
4274 4220 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS_CC)
4275 4221 >> EHCI_HCS_NUM_PORTS_CC_SHIFT);
4276 4222 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4277 4223 "Port routing rules: %s", (Get_Cap(ehci_hcs_params) &
4278 4224 EHCI_HCS_PORT_ROUTING_RULES) ? "Yes" : "No");
4279 4225 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4280 4226 "Port power control: %s", (Get_Cap(ehci_hcs_params) &
4281 4227 EHCI_HCS_PORT_POWER_CONTROL) ? "Yes" : "No");
4282 4228 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4283 4229 "No of root hub ports: 0x%x\n",
4284 4230 Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS);
4285 4231
4286 4232 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4287 4233 "Capability Parameters\n");
4288 4234 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4289 4235 "EHCI extended capability: %s", (Get_Cap(ehci_hcc_params) &
4290 4236 EHCI_HCC_EECP) ? "Yes" : "No");
4291 4237 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4292 4238 "Isoch schedule threshold: 0x%x",
4293 4239 Get_Cap(ehci_hcc_params) & EHCI_HCC_ISOCH_SCHED_THRESHOLD);
4294 4240 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4295 4241 "Async schedule park capability: %s", (Get_Cap(ehci_hcc_params) &
4296 4242 EHCI_HCC_ASYNC_SCHED_PARK_CAP) ? "Yes" : "No");
4297 4243 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4298 4244 "Programmable frame list flag: %s", (Get_Cap(ehci_hcc_params) &
4299 4245 EHCI_HCC_PROG_FRAME_LIST_FLAG) ? "256/512/1024" : "1024");
4300 4246 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4301 4247 "64bit addressing capability: %s\n", (Get_Cap(ehci_hcc_params) &
4302 4248 EHCI_HCC_64BIT_ADDR_CAP) ? "Yes" : "No");
4303 4249
4304 4250 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4305 4251 "Classic Port Route Description");
4306 4252
4307 4253 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4308 4254 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4309 4255 "\tPort Route 0x%x: 0x%x", i, Get_8Cap(ehci_port_route[i]));
4310 4256 }
4311 4257 }
4312 4258
4313 4259
4314 4260 /*
4315 4261 * ehci_print_regs:
4316 4262 */
4317 4263 void
4318 4264 ehci_print_regs(ehci_state_t *ehcip)
4319 4265 {
4320 4266 uint_t i;
4321 4267
4322 4268 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4323 4269 "\n\tEHCI%d Operational Registers\n",
4324 4270 ddi_get_instance(ehcip->ehci_dip));
4325 4271
4326 4272 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4327 4273 "Command: 0x%x Status: 0x%x",
4328 4274 Get_OpReg(ehci_command), Get_OpReg(ehci_status));
4329 4275 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4330 4276 "Interrupt: 0x%x Frame Index: 0x%x",
4331 4277 Get_OpReg(ehci_interrupt), Get_OpReg(ehci_frame_index));
4332 4278 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4333 4279 "Control Segment: 0x%x Periodic List Base: 0x%x",
4334 4280 Get_OpReg(ehci_ctrl_segment), Get_OpReg(ehci_periodic_list_base));
4335 4281 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4336 4282 "Async List Addr: 0x%x Config Flag: 0x%x",
4337 4283 Get_OpReg(ehci_async_list_addr), Get_OpReg(ehci_config_flag));
4338 4284
4339 4285 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4340 4286 "Root Hub Port Status");
4341 4287
4342 4288 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4343 4289 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4344 4290 "\tPort Status 0x%x: 0x%x ", i,
4345 4291 Get_OpReg(ehci_rh_port_status[i]));
4346 4292 }
4347 4293 }
4348 4294
4349 4295
4350 4296 /*
4351 4297 * ehci_print_qh:
4352 4298 */
4353 4299 void
4354 4300 ehci_print_qh(
4355 4301 ehci_state_t *ehcip,
4356 4302 ehci_qh_t *qh)
4357 4303 {
4358 4304 uint_t i;
4359 4305
4360 4306 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4361 4307 "ehci_print_qh: qh = 0x%p", (void *)qh);
4362 4308
4363 4309 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4364 4310 "\tqh_link_ptr: 0x%x ", Get_QH(qh->qh_link_ptr));
4365 4311 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4366 4312 "\tqh_ctrl: 0x%x ", Get_QH(qh->qh_ctrl));
4367 4313 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4368 4314 "\tqh_split_ctrl: 0x%x ", Get_QH(qh->qh_split_ctrl));
4369 4315 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4370 4316 "\tqh_curr_qtd: 0x%x ", Get_QH(qh->qh_curr_qtd));
4371 4317 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4372 4318 "\tqh_next_qtd: 0x%x ", Get_QH(qh->qh_next_qtd));
4373 4319 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4374 4320 "\tqh_alt_next_qtd: 0x%x ", Get_QH(qh->qh_alt_next_qtd));
4375 4321 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4376 4322 "\tqh_status: 0x%x ", Get_QH(qh->qh_status));
4377 4323
4378 4324 for (i = 0; i < 5; i++) {
4379 4325 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4380 4326 "\tqh_buf[%d]: 0x%x ", i, Get_QH(qh->qh_buf[i]));
4381 4327 }
4382 4328
4383 4329 for (i = 0; i < 5; i++) {
4384 4330 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4385 4331 "\tqh_buf_high[%d]: 0x%x ",
4386 4332 i, Get_QH(qh->qh_buf_high[i]));
4387 4333 }
4388 4334
4389 4335 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4390 4336 "\tqh_dummy_qtd: 0x%x ", Get_QH(qh->qh_dummy_qtd));
4391 4337 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4392 4338 "\tqh_prev: 0x%x ", Get_QH(qh->qh_prev));
4393 4339 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4394 4340 "\tqh_state: 0x%x ", Get_QH(qh->qh_state));
4395 4341 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4396 4342 "\tqh_reclaim_next: 0x%x ", Get_QH(qh->qh_reclaim_next));
4397 4343 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4398 4344 "\tqh_reclaim_frame: 0x%x ", Get_QH(qh->qh_reclaim_frame));
4399 4345 }
4400 4346
4401 4347
4402 4348 /*
4403 4349 * ehci_print_qtd:
4404 4350 */
4405 4351 void
4406 4352 ehci_print_qtd(
4407 4353 ehci_state_t *ehcip,
4408 4354 ehci_qtd_t *qtd)
4409 4355 {
4410 4356 uint_t i;
4411 4357
4412 4358 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4413 4359 "ehci_print_qtd: qtd = 0x%p", (void *)qtd);
4414 4360
4415 4361 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4416 4362 "\tqtd_next_qtd: 0x%x ", Get_QTD(qtd->qtd_next_qtd));
4417 4363 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4418 4364 "\tqtd_alt_next_qtd: 0x%x ", Get_QTD(qtd->qtd_alt_next_qtd));
4419 4365 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4420 4366 "\tqtd_ctrl: 0x%x ", Get_QTD(qtd->qtd_ctrl));
4421 4367
4422 4368 for (i = 0; i < 5; i++) {
4423 4369 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4424 4370 "\tqtd_buf[%d]: 0x%x ", i, Get_QTD(qtd->qtd_buf[i]));
4425 4371 }
4426 4372
4427 4373 for (i = 0; i < 5; i++) {
4428 4374 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4429 4375 "\tqtd_buf_high[%d]: 0x%x ",
4430 4376 i, Get_QTD(qtd->qtd_buf_high[i]));
4431 4377 }
4432 4378
4433 4379 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4434 4380 "\tqtd_trans_wrapper: 0x%x ", Get_QTD(qtd->qtd_trans_wrapper));
4435 4381 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4436 4382 "\tqtd_tw_next_qtd: 0x%x ", Get_QTD(qtd->qtd_tw_next_qtd));
4437 4383 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4438 4384 "\tqtd_active_qtd_next: 0x%x ", Get_QTD(qtd->qtd_active_qtd_next));
4439 4385 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4440 4386 "\tqtd_active_qtd_prev: 0x%x ", Get_QTD(qtd->qtd_active_qtd_prev));
4441 4387 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4442 4388 "\tqtd_state: 0x%x ", Get_QTD(qtd->qtd_state));
4443 4389 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4444 4390 "\tqtd_ctrl_phase: 0x%x ", Get_QTD(qtd->qtd_ctrl_phase));
4445 4391 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4446 4392 "\tqtd_xfer_offs: 0x%x ", Get_QTD(qtd->qtd_xfer_offs));
4447 4393 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4448 4394 "\tqtd_xfer_len: 0x%x ", Get_QTD(qtd->qtd_xfer_len));
4449 4395 }
4450 4396
4451 4397 /*
4452 4398 * ehci kstat functions
4453 4399 */
4454 4400
4455 4401 /*
4456 4402 * ehci_create_stats:
4457 4403 *
4458 4404 * Allocate and initialize the ehci kstat structures
4459 4405 */
4460 4406 void
4461 4407 ehci_create_stats(ehci_state_t *ehcip)
4462 4408 {
4463 4409 char kstatname[KSTAT_STRLEN];
4464 4410 const char *dname = ddi_driver_name(ehcip->ehci_dip);
4465 4411 char *usbtypes[USB_N_COUNT_KSTATS] =
4466 4412 {"ctrl", "isoch", "bulk", "intr"};
4467 4413 uint_t instance = ehcip->ehci_instance;
4468 4414 ehci_intrs_stats_t *isp;
4469 4415 int i;
4470 4416
4471 4417 if (EHCI_INTRS_STATS(ehcip) == NULL) {
4472 4418 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs",
4473 4419 dname, instance);
4474 4420 EHCI_INTRS_STATS(ehcip) = kstat_create("usba", instance,
4475 4421 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED,
4476 4422 sizeof (ehci_intrs_stats_t) / sizeof (kstat_named_t),
4477 4423 KSTAT_FLAG_PERSISTENT);
4478 4424
4479 4425 if (EHCI_INTRS_STATS(ehcip)) {
4480 4426 isp = EHCI_INTRS_STATS_DATA(ehcip);
4481 4427 kstat_named_init(&isp->ehci_sts_total,
4482 4428 "Interrupts Total", KSTAT_DATA_UINT64);
4483 4429 kstat_named_init(&isp->ehci_sts_not_claimed,
4484 4430 "Not Claimed", KSTAT_DATA_UINT64);
4485 4431 kstat_named_init(&isp->ehci_sts_async_sched_status,
4486 4432 "Async schedule status", KSTAT_DATA_UINT64);
4487 4433 kstat_named_init(&isp->ehci_sts_periodic_sched_status,
4488 4434 "Periodic sched status", KSTAT_DATA_UINT64);
4489 4435 kstat_named_init(&isp->ehci_sts_empty_async_schedule,
4490 4436 "Empty async schedule", KSTAT_DATA_UINT64);
4491 4437 kstat_named_init(&isp->ehci_sts_host_ctrl_halted,
4492 4438 "Host controller Halted", KSTAT_DATA_UINT64);
4493 4439 kstat_named_init(&isp->ehci_sts_async_advance_intr,
4494 4440 "Intr on async advance", KSTAT_DATA_UINT64);
4495 4441 kstat_named_init(&isp->ehci_sts_host_system_error_intr,
4496 4442 "Host system error", KSTAT_DATA_UINT64);
4497 4443 kstat_named_init(&isp->ehci_sts_frm_list_rollover_intr,
4498 4444 "Frame list rollover", KSTAT_DATA_UINT64);
4499 4445 kstat_named_init(&isp->ehci_sts_rh_port_change_intr,
4500 4446 "Port change detect", KSTAT_DATA_UINT64);
4501 4447 kstat_named_init(&isp->ehci_sts_usb_error_intr,
4502 4448 "USB error interrupt", KSTAT_DATA_UINT64);
4503 4449 kstat_named_init(&isp->ehci_sts_usb_intr,
4504 4450 "USB interrupt", KSTAT_DATA_UINT64);
4505 4451
4506 4452 EHCI_INTRS_STATS(ehcip)->ks_private = ehcip;
4507 4453 EHCI_INTRS_STATS(ehcip)->ks_update = nulldev;
4508 4454 kstat_install(EHCI_INTRS_STATS(ehcip));
4509 4455 }
4510 4456 }
4511 4457
4512 4458 if (EHCI_TOTAL_STATS(ehcip) == NULL) {
4513 4459 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total",
4514 4460 dname, instance);
4515 4461 EHCI_TOTAL_STATS(ehcip) = kstat_create("usba", instance,
4516 4462 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1,
4517 4463 KSTAT_FLAG_PERSISTENT);
4518 4464
4519 4465 if (EHCI_TOTAL_STATS(ehcip)) {
4520 4466 kstat_install(EHCI_TOTAL_STATS(ehcip));
4521 4467 }
4522 4468 }
4523 4469
4524 4470 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4525 4471 if (ehcip->ehci_count_stats[i] == NULL) {
4526 4472 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s",
4527 4473 dname, instance, usbtypes[i]);
4528 4474 ehcip->ehci_count_stats[i] = kstat_create("usba",
4529 4475 instance, kstatname, "usb_byte_count",
4530 4476 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
4531 4477
4532 4478 if (ehcip->ehci_count_stats[i]) {
4533 4479 kstat_install(ehcip->ehci_count_stats[i]);
4534 4480 }
4535 4481 }
4536 4482 }
4537 4483 }
4538 4484
4539 4485
4540 4486 /*
4541 4487 * ehci_destroy_stats:
4542 4488 *
4543 4489 * Clean up ehci kstat structures
4544 4490 */
4545 4491 void
4546 4492 ehci_destroy_stats(ehci_state_t *ehcip)
4547 4493 {
4548 4494 int i;
4549 4495
4550 4496 if (EHCI_INTRS_STATS(ehcip)) {
4551 4497 kstat_delete(EHCI_INTRS_STATS(ehcip));
4552 4498 EHCI_INTRS_STATS(ehcip) = NULL;
4553 4499 }
4554 4500
4555 4501 if (EHCI_TOTAL_STATS(ehcip)) {
4556 4502 kstat_delete(EHCI_TOTAL_STATS(ehcip));
4557 4503 EHCI_TOTAL_STATS(ehcip) = NULL;
4558 4504 }
4559 4505
4560 4506 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4561 4507 if (ehcip->ehci_count_stats[i]) {
4562 4508 kstat_delete(ehcip->ehci_count_stats[i]);
4563 4509 ehcip->ehci_count_stats[i] = NULL;
4564 4510 }
4565 4511 }
4566 4512 }
4567 4513
4568 4514
4569 4515 /*
4570 4516 * ehci_do_intrs_stats:
4571 4517 *
4572 4518 * ehci status information
4573 4519 */
4574 4520 void
4575 4521 ehci_do_intrs_stats(
4576 4522 ehci_state_t *ehcip,
4577 4523 int val)
4578 4524 {
4579 4525 if (EHCI_INTRS_STATS(ehcip)) {
4580 4526 EHCI_INTRS_STATS_DATA(ehcip)->ehci_sts_total.value.ui64++;
4581 4527 switch (val) {
4582 4528 case EHCI_STS_ASYNC_SCHED_STATUS:
4583 4529 EHCI_INTRS_STATS_DATA(ehcip)->
4584 4530 ehci_sts_async_sched_status.value.ui64++;
4585 4531 break;
4586 4532 case EHCI_STS_PERIODIC_SCHED_STATUS:
4587 4533 EHCI_INTRS_STATS_DATA(ehcip)->
4588 4534 ehci_sts_periodic_sched_status.value.ui64++;
4589 4535 break;
4590 4536 case EHCI_STS_EMPTY_ASYNC_SCHEDULE:
4591 4537 EHCI_INTRS_STATS_DATA(ehcip)->
4592 4538 ehci_sts_empty_async_schedule.value.ui64++;
4593 4539 break;
4594 4540 case EHCI_STS_HOST_CTRL_HALTED:
4595 4541 EHCI_INTRS_STATS_DATA(ehcip)->
4596 4542 ehci_sts_host_ctrl_halted.value.ui64++;
4597 4543 break;
4598 4544 case EHCI_STS_ASYNC_ADVANCE_INTR:
4599 4545 EHCI_INTRS_STATS_DATA(ehcip)->
4600 4546 ehci_sts_async_advance_intr.value.ui64++;
4601 4547 break;
4602 4548 case EHCI_STS_HOST_SYSTEM_ERROR_INTR:
4603 4549 EHCI_INTRS_STATS_DATA(ehcip)->
4604 4550 ehci_sts_host_system_error_intr.value.ui64++;
4605 4551 break;
4606 4552 case EHCI_STS_FRM_LIST_ROLLOVER_INTR:
4607 4553 EHCI_INTRS_STATS_DATA(ehcip)->
4608 4554 ehci_sts_frm_list_rollover_intr.value.ui64++;
4609 4555 break;
4610 4556 case EHCI_STS_RH_PORT_CHANGE_INTR:
4611 4557 EHCI_INTRS_STATS_DATA(ehcip)->
4612 4558 ehci_sts_rh_port_change_intr.value.ui64++;
4613 4559 break;
4614 4560 case EHCI_STS_USB_ERROR_INTR:
4615 4561 EHCI_INTRS_STATS_DATA(ehcip)->
4616 4562 ehci_sts_usb_error_intr.value.ui64++;
4617 4563 break;
4618 4564 case EHCI_STS_USB_INTR:
4619 4565 EHCI_INTRS_STATS_DATA(ehcip)->
4620 4566 ehci_sts_usb_intr.value.ui64++;
4621 4567 break;
4622 4568 default:
4623 4569 EHCI_INTRS_STATS_DATA(ehcip)->
4624 4570 ehci_sts_not_claimed.value.ui64++;
4625 4571 break;
4626 4572 }
4627 4573 }
4628 4574 }
4629 4575
4630 4576
4631 4577 /*
4632 4578 * ehci_do_byte_stats:
4633 4579 *
4634 4580 * ehci data xfer information
4635 4581 */
4636 4582 void
4637 4583 ehci_do_byte_stats(
4638 4584 ehci_state_t *ehcip,
4639 4585 size_t len,
4640 4586 uint8_t attr,
4641 4587 uint8_t addr)
4642 4588 {
4643 4589 uint8_t type = attr & USB_EP_ATTR_MASK;
4644 4590 uint8_t dir = addr & USB_EP_DIR_MASK;
4645 4591
4646 4592 if (dir == USB_EP_DIR_IN) {
4647 4593 EHCI_TOTAL_STATS_DATA(ehcip)->reads++;
4648 4594 EHCI_TOTAL_STATS_DATA(ehcip)->nread += len;
4649 4595 switch (type) {
4650 4596 case USB_EP_ATTR_CONTROL:
4651 4597 EHCI_CTRL_STATS(ehcip)->reads++;
4652 4598 EHCI_CTRL_STATS(ehcip)->nread += len;
4653 4599 break;
4654 4600 case USB_EP_ATTR_BULK:
4655 4601 EHCI_BULK_STATS(ehcip)->reads++;
4656 4602 EHCI_BULK_STATS(ehcip)->nread += len;
4657 4603 break;
4658 4604 case USB_EP_ATTR_INTR:
4659 4605 EHCI_INTR_STATS(ehcip)->reads++;
4660 4606 EHCI_INTR_STATS(ehcip)->nread += len;
4661 4607 break;
4662 4608 case USB_EP_ATTR_ISOCH:
4663 4609 EHCI_ISOC_STATS(ehcip)->reads++;
4664 4610 EHCI_ISOC_STATS(ehcip)->nread += len;
4665 4611 break;
4666 4612 }
4667 4613 } else if (dir == USB_EP_DIR_OUT) {
4668 4614 EHCI_TOTAL_STATS_DATA(ehcip)->writes++;
4669 4615 EHCI_TOTAL_STATS_DATA(ehcip)->nwritten += len;
4670 4616 switch (type) {
4671 4617 case USB_EP_ATTR_CONTROL:
4672 4618 EHCI_CTRL_STATS(ehcip)->writes++;
4673 4619 EHCI_CTRL_STATS(ehcip)->nwritten += len;
4674 4620 break;
4675 4621 case USB_EP_ATTR_BULK:
4676 4622 EHCI_BULK_STATS(ehcip)->writes++;
4677 4623 EHCI_BULK_STATS(ehcip)->nwritten += len;
4678 4624 break;
4679 4625 case USB_EP_ATTR_INTR:
4680 4626 EHCI_INTR_STATS(ehcip)->writes++;
4681 4627 EHCI_INTR_STATS(ehcip)->nwritten += len;
4682 4628 break;
4683 4629 case USB_EP_ATTR_ISOCH:
4684 4630 EHCI_ISOC_STATS(ehcip)->writes++;
4685 4631 EHCI_ISOC_STATS(ehcip)->nwritten += len;
4686 4632 break;
4687 4633 }
4688 4634 }
4689 4635 }
|
↓ open down ↓ |
1700 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX