1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2018, Joyent, Inc.
25 */
26
27 /*
28 * Copyright 2018 Nexenta Systems, Inc.
29 */
30
31 /*
32 * EHCI Host Controller Driver (EHCI)
33 *
34 * The EHCI driver is a software driver which interfaces to the Universal
35 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
36 * the Host Controller is defined by the EHCI Host Controller Interface.
37 *
38 * This module contains the main EHCI driver code which handles all USB
39 * transfers, bandwidth allocations and other general functionalities.
40 */
41
42 #include <sys/usb/hcd/ehci/ehcid.h>
43 #include <sys/usb/hcd/ehci/ehci_isoch.h>
44 #include <sys/usb/hcd/ehci/ehci_xfer.h>
45
46 /*
47 * EHCI MSI tunable:
48 *
49 * By default MSI is enabled on all supported platforms except for the
50 * EHCI controller of ULI1575 South bridge.
51 */
52 boolean_t ehci_enable_msi = B_TRUE;
53
54 /* Pointer to the state structure */
55 extern void *ehci_statep;
56
57 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *);
58
59 extern uint_t ehci_vt62x2_workaround;
60 extern int force_ehci_off;
61
62 /* Adjustable variables for the size of the pools */
63 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE;
64 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE;
65
66 /*
67 * Initialize the values which the order of 32ms intr qh are executed
68 * by the host controller in the lattice tree.
69 */
70 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] =
71 {0x00, 0x10, 0x08, 0x18,
72 0x04, 0x14, 0x0c, 0x1c,
73 0x02, 0x12, 0x0a, 0x1a,
74 0x06, 0x16, 0x0e, 0x1e,
75 0x01, 0x11, 0x09, 0x19,
76 0x05, 0x15, 0x0d, 0x1d,
77 0x03, 0x13, 0x0b, 0x1b,
78 0x07, 0x17, 0x0f, 0x1f};
79
80 /*
81 * Initialize the values which are used to calculate start split mask
82 * for the low/full/high speed interrupt and isochronous endpoints.
83 */
84 static uint_t ehci_start_split_mask[15] = {
85 /*
86 * For high/full/low speed usb devices. For high speed
87 * device with polling interval greater than or equal
88 * to 8us (125us).
89 */
90 0x01, /* 00000001 */
91 0x02, /* 00000010 */
92 0x04, /* 00000100 */
93 0x08, /* 00001000 */
94 0x10, /* 00010000 */
95 0x20, /* 00100000 */
96 0x40, /* 01000000 */
97 0x80, /* 10000000 */
98
99 /* Only for high speed devices with polling interval 4us */
100 0x11, /* 00010001 */
101 0x22, /* 00100010 */
102 0x44, /* 01000100 */
103 0x88, /* 10001000 */
104
105 /* Only for high speed devices with polling interval 2us */
106 0x55, /* 01010101 */
107 0xaa, /* 10101010 */
108
109 /* Only for high speed devices with polling interval 1us */
110 0xff /* 11111111 */
111 };
112
113 /*
114 * Initialize the values which are used to calculate complete split mask
115 * for the low/full speed interrupt and isochronous endpoints.
116 */
117 static uint_t ehci_intr_complete_split_mask[7] = {
118 /* Only full/low speed devices */
119 0x1c, /* 00011100 */
120 0x38, /* 00111000 */
121 0x70, /* 01110000 */
122 0xe0, /* 11100000 */
123 0x00, /* Need FSTN feature */
124 0x00, /* Need FSTN feature */
125 0x00 /* Need FSTN feature */
126 };
127
128
129 /*
130 * EHCI Internal Function Prototypes
131 */
132
133 /* Host Controller Driver (HCD) initialization functions */
134 void ehci_set_dma_attributes(ehci_state_t *ehcip);
135 int ehci_allocate_pools(ehci_state_t *ehcip);
136 void ehci_decode_ddi_dma_addr_bind_handle_result(
137 ehci_state_t *ehcip,
138 int result);
139 int ehci_map_regs(ehci_state_t *ehcip);
140 int ehci_register_intrs_and_init_mutex(
141 ehci_state_t *ehcip);
142 static int ehci_add_intrs(ehci_state_t *ehcip,
143 int intr_type);
144 int ehci_init_ctlr(ehci_state_t *ehcip,
145 int init_type);
146 static int ehci_take_control(ehci_state_t *ehcip);
147 static int ehci_init_periodic_frame_lst_table(
148 ehci_state_t *ehcip);
149 static void ehci_build_interrupt_lattice(
150 ehci_state_t *ehcip);
151 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t *ehcip);
152
153 /* Host Controller Driver (HCD) deinitialization functions */
154 int ehci_cleanup(ehci_state_t *ehcip);
155 static void ehci_rem_intrs(ehci_state_t *ehcip);
156 int ehci_cpr_suspend(ehci_state_t *ehcip);
157 int ehci_cpr_resume(ehci_state_t *ehcip);
158
159 /* Bandwidth Allocation functions */
160 int ehci_allocate_bandwidth(ehci_state_t *ehcip,
161 usba_pipe_handle_data_t *ph,
162 uint_t *pnode,
163 uchar_t *smask,
164 uchar_t *cmask);
165 static int ehci_allocate_high_speed_bandwidth(
166 ehci_state_t *ehcip,
167 usba_pipe_handle_data_t *ph,
168 uint_t *hnode,
169 uchar_t *smask,
170 uchar_t *cmask);
171 static int ehci_allocate_classic_tt_bandwidth(
172 ehci_state_t *ehcip,
173 usba_pipe_handle_data_t *ph,
174 uint_t pnode);
175 void ehci_deallocate_bandwidth(ehci_state_t *ehcip,
176 usba_pipe_handle_data_t *ph,
177 uint_t pnode,
178 uchar_t smask,
179 uchar_t cmask);
180 static void ehci_deallocate_high_speed_bandwidth(
181 ehci_state_t *ehcip,
182 usba_pipe_handle_data_t *ph,
183 uint_t hnode,
184 uchar_t smask,
185 uchar_t cmask);
186 static void ehci_deallocate_classic_tt_bandwidth(
187 ehci_state_t *ehcip,
188 usba_pipe_handle_data_t *ph,
189 uint_t pnode);
190 static int ehci_compute_high_speed_bandwidth(
191 ehci_state_t *ehcip,
192 usb_ep_descr_t *endpoint,
193 usb_port_status_t port_status,
194 uint_t *sbandwidth,
195 uint_t *cbandwidth);
196 static int ehci_compute_classic_bandwidth(
197 usb_ep_descr_t *endpoint,
198 usb_port_status_t port_status,
199 uint_t *bandwidth);
200 int ehci_adjust_polling_interval(
201 ehci_state_t *ehcip,
202 usb_ep_descr_t *endpoint,
203 usb_port_status_t port_status);
204 static int ehci_adjust_high_speed_polling_interval(
205 ehci_state_t *ehcip,
206 usb_ep_descr_t *endpoint);
207 static uint_t ehci_lattice_height(uint_t interval);
208 static uint_t ehci_lattice_parent(uint_t node);
209 static uint_t ehci_find_periodic_node(
210 uint_t leaf,
211 int interval);
212 static uint_t ehci_leftmost_leaf(uint_t node,
213 uint_t height);
214 static uint_t ehci_pow_2(uint_t x);
215 static uint_t ehci_log_2(uint_t x);
216 static int ehci_find_bestfit_hs_mask(
217 ehci_state_t *ehcip,
218 uchar_t *smask,
219 uint_t *pnode,
220 usb_ep_descr_t *endpoint,
221 uint_t bandwidth,
222 int interval);
223 static int ehci_find_bestfit_ls_intr_mask(
224 ehci_state_t *ehcip,
225 uchar_t *smask,
226 uchar_t *cmask,
227 uint_t *pnode,
228 uint_t sbandwidth,
229 uint_t cbandwidth,
230 int interval);
231 static int ehci_find_bestfit_sitd_in_mask(
232 ehci_state_t *ehcip,
233 uchar_t *smask,
234 uchar_t *cmask,
235 uint_t *pnode,
236 uint_t sbandwidth,
237 uint_t cbandwidth,
238 int interval);
239 static int ehci_find_bestfit_sitd_out_mask(
240 ehci_state_t *ehcip,
241 uchar_t *smask,
242 uint_t *pnode,
243 uint_t sbandwidth,
244 int interval);
245 static uint_t ehci_calculate_bw_availability_mask(
246 ehci_state_t *ehcip,
247 uint_t bandwidth,
248 int leaf,
249 int leaf_count,
250 uchar_t *bw_mask);
251 static void ehci_update_bw_availability(
252 ehci_state_t *ehcip,
253 int bandwidth,
254 int leftmost_leaf,
255 int leaf_count,
256 uchar_t mask);
257
258 /* Miscellaneous functions */
259 ehci_state_t *ehci_obtain_state(
260 dev_info_t *dip);
261 int ehci_state_is_operational(
262 ehci_state_t *ehcip);
263 int ehci_do_soft_reset(
264 ehci_state_t *ehcip);
265 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t *ehcip,
266 ehci_pipe_private_t *pp,
267 ehci_trans_wrapper_t *tw);
268 usb_frame_number_t ehci_get_current_frame_number(
269 ehci_state_t *ehcip);
270 static void ehci_cpr_cleanup(
271 ehci_state_t *ehcip);
272 int ehci_wait_for_sof(
273 ehci_state_t *ehcip);
274 void ehci_toggle_scheduler(
275 ehci_state_t *ehcip);
276 void ehci_print_caps(ehci_state_t *ehcip);
277 void ehci_print_regs(ehci_state_t *ehcip);
278 void ehci_print_qh(ehci_state_t *ehcip,
279 ehci_qh_t *qh);
280 void ehci_print_qtd(ehci_state_t *ehcip,
281 ehci_qtd_t *qtd);
282 void ehci_create_stats(ehci_state_t *ehcip);
283 void ehci_destroy_stats(ehci_state_t *ehcip);
284 void ehci_do_intrs_stats(ehci_state_t *ehcip,
285 int val);
286 void ehci_do_byte_stats(ehci_state_t *ehcip,
287 size_t len,
288 uint8_t attr,
289 uint8_t addr);
290
291 /*
292 * check if this ehci controller can support PM
293 */
294 int
295 ehci_hcdi_pm_support(dev_info_t *dip)
296 {
297 ehci_state_t *ehcip = ddi_get_soft_state(ehci_statep,
298 ddi_get_instance(dip));
299
300 if (((ehcip->ehci_vendor_id == PCI_VENDOR_NEC_COMBO) &&
301 (ehcip->ehci_device_id == PCI_DEVICE_NEC_COMBO)) ||
302
303 ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
304 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) ||
305
306 (ehcip->ehci_vendor_id == PCI_VENDOR_VIA)) {
307
308 return (USB_SUCCESS);
309 }
310
311 return (USB_FAILURE);
312 }
313
314 void
315 ehci_dma_attr_workaround(ehci_state_t *ehcip)
316 {
317 /*
318 * Some Nvidia chips can not handle qh dma address above 2G.
319 * The bit 31 of the dma address might be omitted and it will
320 * cause system crash or other unpredicable result. So force
321 * the dma address allocated below 2G to make ehci work.
322 */
323 if (PCI_VENDOR_NVIDIA == ehcip->ehci_vendor_id) {
324 switch (ehcip->ehci_device_id) {
325 case PCI_DEVICE_NVIDIA_CK804:
326 case PCI_DEVICE_NVIDIA_MCP04:
327 USB_DPRINTF_L2(PRINT_MASK_ATTA,
328 ehcip->ehci_log_hdl,
329 "ehci_dma_attr_workaround: NVIDIA dma "
330 "workaround enabled, force dma address "
331 "to be allocated below 2G");
332 ehcip->ehci_dma_attr.dma_attr_addr_hi =
333 0x7fffffffull;
334 break;
335 default:
336 break;
337
338 }
339 }
340 }
341
342 /*
343 * Host Controller Driver (HCD) initialization functions
344 */
345
346 /*
347 * ehci_set_dma_attributes:
348 *
349 * Set the limits in the DMA attributes structure. Most of the values used
350 * in the DMA limit structures are the default values as specified by the
351 * Writing PCI device drivers document.
352 */
353 void
354 ehci_set_dma_attributes(ehci_state_t *ehcip)
355 {
356 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
357 "ehci_set_dma_attributes:");
358
359 /* Initialize the DMA attributes */
360 ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0;
361 ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull;
362 ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull;
363
364 /* 32 bit addressing */
365 ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX;
366
367 /* Byte alignment */
368 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
369
370 /*
371 * Since PCI specification is byte alignment, the
372 * burst size field should be set to 1 for PCI devices.
373 */
374 ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1;
375
376 ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1;
377 ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER;
378 ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull;
379 ehcip->ehci_dma_attr.dma_attr_sgllen = 1;
380 ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR;
381 ehcip->ehci_dma_attr.dma_attr_flags = 0;
382 ehci_dma_attr_workaround(ehcip);
383 }
384
385
386 /*
387 * ehci_allocate_pools:
388 *
389 * Allocate the system memory for the Endpoint Descriptor (QH) and for the
390 * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned
391 * to a 16 byte boundary.
392 */
393 int
394 ehci_allocate_pools(ehci_state_t *ehcip)
395 {
396 ddi_device_acc_attr_t dev_attr;
397 size_t real_length;
398 int result;
399 uint_t ccount;
400 int i;
401
402 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
403 "ehci_allocate_pools:");
404
405 /* The host controller will be little endian */
406 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
407 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
408 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
409
410 /* Byte alignment */
411 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT;
412
413 /* Allocate the QTD pool DMA handle */
414 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
415 DDI_DMA_SLEEP, 0,
416 &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) {
417
418 goto failure;
419 }
420
421 /* Allocate the memory for the QTD pool */
422 if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle,
423 ehci_qtd_pool_size * sizeof (ehci_qtd_t),
424 &dev_attr,
425 DDI_DMA_CONSISTENT,
426 DDI_DMA_SLEEP,
427 0,
428 (caddr_t *)&ehcip->ehci_qtd_pool_addr,
429 &real_length,
430 &ehcip->ehci_qtd_pool_mem_handle)) {
431
432 goto failure;
433 }
434
435 /* Map the QTD pool into the I/O address space */
436 result = ddi_dma_addr_bind_handle(
437 ehcip->ehci_qtd_pool_dma_handle,
438 NULL,
439 (caddr_t)ehcip->ehci_qtd_pool_addr,
440 real_length,
441 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
442 DDI_DMA_SLEEP,
443 NULL,
444 &ehcip->ehci_qtd_pool_cookie,
445 &ccount);
446
447 bzero((void *)ehcip->ehci_qtd_pool_addr,
448 ehci_qtd_pool_size * sizeof (ehci_qtd_t));
449
450 /* Process the result */
451 if (result == DDI_DMA_MAPPED) {
452 /* The cookie count should be 1 */
453 if (ccount != 1) {
454 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
455 "ehci_allocate_pools: More than 1 cookie");
456
457 goto failure;
458 }
459 } else {
460 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
461 "ehci_allocate_pools: Result = %d", result);
462
463 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
464
465 goto failure;
466 }
467
468 /*
469 * DMA addresses for QTD pools are bound
470 */
471 ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND;
472
473 /* Initialize the QTD pool */
474 for (i = 0; i < ehci_qtd_pool_size; i ++) {
475 Set_QTD(ehcip->ehci_qtd_pool_addr[i].
476 qtd_state, EHCI_QTD_FREE);
477 }
478
479 /* Allocate the QTD pool DMA handle */
480 if (ddi_dma_alloc_handle(ehcip->ehci_dip,
481 &ehcip->ehci_dma_attr,
482 DDI_DMA_SLEEP,
483 0,
484 &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) {
485 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
486 "ehci_allocate_pools: ddi_dma_alloc_handle failed");
487
488 goto failure;
489 }
490
491 /* Allocate the memory for the QH pool */
492 if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle,
493 ehci_qh_pool_size * sizeof (ehci_qh_t),
494 &dev_attr,
495 DDI_DMA_CONSISTENT,
496 DDI_DMA_SLEEP,
497 0,
498 (caddr_t *)&ehcip->ehci_qh_pool_addr,
499 &real_length,
500 &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) {
501 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
502 "ehci_allocate_pools: ddi_dma_mem_alloc failed");
503
504 goto failure;
505 }
506
507 result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle,
508 NULL,
509 (caddr_t)ehcip->ehci_qh_pool_addr,
510 real_length,
511 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
512 DDI_DMA_SLEEP,
513 NULL,
514 &ehcip->ehci_qh_pool_cookie,
515 &ccount);
516
517 bzero((void *)ehcip->ehci_qh_pool_addr,
518 ehci_qh_pool_size * sizeof (ehci_qh_t));
519
520 /* Process the result */
521 if (result == DDI_DMA_MAPPED) {
522 /* The cookie count should be 1 */
523 if (ccount != 1) {
524 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
525 "ehci_allocate_pools: More than 1 cookie");
526
527 goto failure;
528 }
529 } else {
530 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
531
532 goto failure;
533 }
534
535 /*
536 * DMA addresses for QH pools are bound
537 */
538 ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND;
539
540 /* Initialize the QH pool */
541 for (i = 0; i < ehci_qh_pool_size; i ++) {
542 Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE);
543 }
544
545 /* Byte alignment */
546 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
547
548 return (DDI_SUCCESS);
549
550 failure:
551 /* Byte alignment */
552 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
553
554 return (DDI_FAILURE);
555 }
556
557
558 /*
559 * ehci_decode_ddi_dma_addr_bind_handle_result:
560 *
561 * Process the return values of ddi_dma_addr_bind_handle()
562 */
563 void
564 ehci_decode_ddi_dma_addr_bind_handle_result(
565 ehci_state_t *ehcip,
566 int result)
567 {
568 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
569 "ehci_decode_ddi_dma_addr_bind_handle_result:");
570
571 switch (result) {
572 case DDI_DMA_PARTIAL_MAP:
573 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
574 "Partial transfers not allowed");
575 break;
576 case DDI_DMA_INUSE:
577 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
578 "Handle is in use");
579 break;
580 case DDI_DMA_NORESOURCES:
581 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
582 "No resources");
583 break;
584 case DDI_DMA_NOMAPPING:
585 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
586 "No mapping");
587 break;
588 case DDI_DMA_TOOBIG:
589 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
590 "Object is too big");
591 break;
592 default:
593 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
594 "Unknown dma error");
595 }
596 }
597
598
599 /*
600 * ehci_map_regs:
601 *
602 * The Host Controller (HC) contains a set of on-chip operational registers
603 * and which should be mapped into a non-cacheable portion of the system
604 * addressable space.
605 */
606 int
607 ehci_map_regs(ehci_state_t *ehcip)
608 {
609 ddi_device_acc_attr_t attr;
610 uint16_t cmd_reg;
611 uint_t length;
612
613 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:");
614
615 /* Check to make sure we have memory access */
616 if (pci_config_setup(ehcip->ehci_dip,
617 &ehcip->ehci_config_handle) != DDI_SUCCESS) {
618
619 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
620 "ehci_map_regs: Config error");
621
622 return (DDI_FAILURE);
623 }
624
625 /* Make sure Memory Access Enable is set */
626 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
627
628 if (!(cmd_reg & PCI_COMM_MAE)) {
629
630 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
631 "ehci_map_regs: Memory base address access disabled");
632
633 return (DDI_FAILURE);
634 }
635
636 /* The host controller will be little endian */
637 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
638 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
639 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
640
641 /* Map in EHCI Capability registers */
642 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
643 (caddr_t *)&ehcip->ehci_capsp, 0,
644 sizeof (ehci_caps_t), &attr,
645 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
646
647 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
648 "ehci_map_regs: Map setup error");
649
650 return (DDI_FAILURE);
651 }
652
653 length = ddi_get8(ehcip->ehci_caps_handle,
654 (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length);
655
656 /* Free the original mapping */
657 ddi_regs_map_free(&ehcip->ehci_caps_handle);
658
659 /* Re-map in EHCI Capability and Operational registers */
660 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
661 (caddr_t *)&ehcip->ehci_capsp, 0,
662 length + sizeof (ehci_regs_t), &attr,
663 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
664
665 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
666 "ehci_map_regs: Map setup error");
667
668 return (DDI_FAILURE);
669 }
670
671 /* Get the pointer to EHCI Operational Register */
672 ehcip->ehci_regsp = (ehci_regs_t *)
673 ((uintptr_t)ehcip->ehci_capsp + length);
674
675 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
676 "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n",
677 (void *)ehcip->ehci_capsp, (void *)ehcip->ehci_regsp);
678
679 return (DDI_SUCCESS);
680 }
681
682 /*
683 * The following simulated polling is for debugging purposes only.
684 * It is activated on x86 by setting usb-polling=true in GRUB or ehci.conf.
685 */
686 static int
687 ehci_is_polled(dev_info_t *dip)
688 {
689 int ret;
690 char *propval;
691
692 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
693 "usb-polling", &propval) != DDI_SUCCESS)
694
695 return (0);
696
697 ret = (strcmp(propval, "true") == 0);
698 ddi_prop_free(propval);
699
700 return (ret);
701 }
702
703 static void
704 ehci_poll_intr(void *arg)
705 {
706 /* poll every msec */
707 for (;;) {
708 (void) ehci_intr(arg, NULL);
709 delay(drv_usectohz(1000));
710 }
711 }
712
713 /*
714 * ehci_register_intrs_and_init_mutex:
715 *
716 * Register interrupts and initialize each mutex and condition variables
717 */
718 int
719 ehci_register_intrs_and_init_mutex(ehci_state_t *ehcip)
720 {
721 int intr_types;
722
723 #if defined(__x86)
724 uint8_t iline;
725 #endif
726
727 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
728 "ehci_register_intrs_and_init_mutex:");
729
730 /*
731 * There is a known MSI hardware bug with the EHCI controller
732 * of ULI1575 southbridge. Hence MSI is disabled for this chip.
733 */
734 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
735 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
736 ehcip->ehci_msi_enabled = B_FALSE;
737 } else {
738 /* Set the MSI enable flag from the global EHCI MSI tunable */
739 ehcip->ehci_msi_enabled = ehci_enable_msi;
740 }
741
742 /* launch polling thread instead of enabling pci interrupt */
743 if (ehci_is_polled(ehcip->ehci_dip)) {
744 extern pri_t maxclsyspri;
745
746 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
747 "ehci_register_intrs_and_init_mutex: "
748 "running in simulated polled mode");
749
750 (void) thread_create(NULL, 0, ehci_poll_intr, ehcip, 0, &p0,
751 TS_RUN, maxclsyspri);
752
753 return (DDI_SUCCESS);
754 }
755
756 #if defined(__x86)
757 /*
758 * Make sure that the interrupt pin is connected to the
759 * interrupt controller on x86. Interrupt line 255 means
760 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43).
761 * If we would return failure when interrupt line equals 255, then
762 * high speed devices will be routed to companion host controllers.
763 * However, it is not necessary to return failure here, and
764 * o/uhci codes don't check the interrupt line either.
765 * But it's good to log a message here for debug purposes.
766 */
767 iline = pci_config_get8(ehcip->ehci_config_handle,
768 PCI_CONF_ILINE);
769
770 if (iline == 255) {
771 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
772 "ehci_register_intrs_and_init_mutex: "
773 "interrupt line value out of range (%d)",
774 iline);
775 }
776 #endif /* __x86 */
777
778 /* Get supported interrupt types */
779 if (ddi_intr_get_supported_types(ehcip->ehci_dip,
780 &intr_types) != DDI_SUCCESS) {
781 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
782 "ehci_register_intrs_and_init_mutex: "
783 "ddi_intr_get_supported_types failed");
784
785 return (DDI_FAILURE);
786 }
787
788 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
789 "ehci_register_intrs_and_init_mutex: "
790 "supported interrupt types 0x%x", intr_types);
791
792 if ((intr_types & DDI_INTR_TYPE_MSI) && ehcip->ehci_msi_enabled) {
793 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_MSI)
794 != DDI_SUCCESS) {
795 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
796 "ehci_register_intrs_and_init_mutex: MSI "
797 "registration failed, trying FIXED interrupt \n");
798 } else {
799 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
800 "ehci_register_intrs_and_init_mutex: "
801 "Using MSI interrupt type\n");
802
803 ehcip->ehci_intr_type = DDI_INTR_TYPE_MSI;
804 ehcip->ehci_flags |= EHCI_INTR;
805 }
806 }
807
808 if ((!(ehcip->ehci_flags & EHCI_INTR)) &&
809 (intr_types & DDI_INTR_TYPE_FIXED)) {
810 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_FIXED)
811 != DDI_SUCCESS) {
812 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
813 "ehci_register_intrs_and_init_mutex: "
814 "FIXED interrupt registration failed\n");
815
816 return (DDI_FAILURE);
817 }
818
819 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
820 "ehci_register_intrs_and_init_mutex: "
821 "Using FIXED interrupt type\n");
822
823 ehcip->ehci_intr_type = DDI_INTR_TYPE_FIXED;
824 ehcip->ehci_flags |= EHCI_INTR;
825 }
826
827 return (DDI_SUCCESS);
828 }
829
830
831 /*
832 * ehci_add_intrs:
833 *
834 * Register FIXED or MSI interrupts.
835 */
836 static int
837 ehci_add_intrs(ehci_state_t *ehcip, int intr_type)
838 {
839 int actual, avail, intr_size, count = 0;
840 int i, flag, ret;
841
842 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
843 "ehci_add_intrs: interrupt type 0x%x", intr_type);
844
845 /* Get number of interrupts */
846 ret = ddi_intr_get_nintrs(ehcip->ehci_dip, intr_type, &count);
847 if ((ret != DDI_SUCCESS) || (count == 0)) {
848 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
849 "ehci_add_intrs: ddi_intr_get_nintrs() failure, "
850 "ret: %d, count: %d", ret, count);
851
852 return (DDI_FAILURE);
853 }
854
855 /* Get number of available interrupts */
856 ret = ddi_intr_get_navail(ehcip->ehci_dip, intr_type, &avail);
857 if ((ret != DDI_SUCCESS) || (avail == 0)) {
858 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
859 "ehci_add_intrs: ddi_intr_get_navail() failure, "
860 "ret: %d, count: %d", ret, count);
861
862 return (DDI_FAILURE);
863 }
864
865 if (avail < count) {
866 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
867 "ehci_add_intrs: ehci_add_intrs: nintrs () "
868 "returned %d, navail returned %d\n", count, avail);
869 }
870
871 /* Allocate an array of interrupt handles */
872 intr_size = count * sizeof (ddi_intr_handle_t);
873 ehcip->ehci_htable = kmem_zalloc(intr_size, KM_SLEEP);
874
875 flag = (intr_type == DDI_INTR_TYPE_MSI) ?
876 DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL;
877
878 /* call ddi_intr_alloc() */
879 ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable,
880 intr_type, 0, count, &actual, flag);
881
882 if ((ret != DDI_SUCCESS) || (actual == 0)) {
883 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
884 "ehci_add_intrs: ddi_intr_alloc() failed %d", ret);
885
886 kmem_free(ehcip->ehci_htable, intr_size);
887
888 return (DDI_FAILURE);
889 }
890
891 if (actual < count) {
892 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
893 "ehci_add_intrs: Requested: %d, Received: %d\n",
894 count, actual);
895
896 for (i = 0; i < actual; i++)
897 (void) ddi_intr_free(ehcip->ehci_htable[i]);
898
899 kmem_free(ehcip->ehci_htable, intr_size);
900
901 return (DDI_FAILURE);
902 }
903
904 ehcip->ehci_intr_cnt = actual;
905
906 if ((ret = ddi_intr_get_pri(ehcip->ehci_htable[0],
907 &ehcip->ehci_intr_pri)) != DDI_SUCCESS) {
908 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
909 "ehci_add_intrs: ddi_intr_get_pri() failed %d", ret);
910
911 for (i = 0; i < actual; i++)
912 (void) ddi_intr_free(ehcip->ehci_htable[i]);
913
914 kmem_free(ehcip->ehci_htable, intr_size);
915
916 return (DDI_FAILURE);
917 }
918
919 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
920 "ehci_add_intrs: Supported Interrupt priority 0x%x",
921 ehcip->ehci_intr_pri);
922
923 /* Test for high level mutex */
924 if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) {
925 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
926 "ehci_add_intrs: Hi level interrupt not supported");
927
928 for (i = 0; i < actual; i++)
929 (void) ddi_intr_free(ehcip->ehci_htable[i]);
930
931 kmem_free(ehcip->ehci_htable, intr_size);
932
933 return (DDI_FAILURE);
934 }
935
936 /* Initialize the mutex */
937 mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER,
938 DDI_INTR_PRI(ehcip->ehci_intr_pri));
939
940 /* Call ddi_intr_add_handler() */
941 for (i = 0; i < actual; i++) {
942 if ((ret = ddi_intr_add_handler(ehcip->ehci_htable[i],
943 ehci_intr, (caddr_t)ehcip,
944 (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
945 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
946 "ehci_add_intrs:ddi_intr_add_handler() "
947 "failed %d", ret);
948
949 for (i = 0; i < actual; i++)
950 (void) ddi_intr_free(ehcip->ehci_htable[i]);
951
952 mutex_destroy(&ehcip->ehci_int_mutex);
953 kmem_free(ehcip->ehci_htable, intr_size);
954
955 return (DDI_FAILURE);
956 }
957 }
958
959 if ((ret = ddi_intr_get_cap(ehcip->ehci_htable[0],
960 &ehcip->ehci_intr_cap)) != DDI_SUCCESS) {
961 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
962 "ehci_add_intrs: ddi_intr_get_cap() failed %d", ret);
963
964 for (i = 0; i < actual; i++) {
965 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
966 (void) ddi_intr_free(ehcip->ehci_htable[i]);
967 }
968
969 mutex_destroy(&ehcip->ehci_int_mutex);
970 kmem_free(ehcip->ehci_htable, intr_size);
971
972 return (DDI_FAILURE);
973 }
974
975 /* Enable all interrupts */
976 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
977 /* Call ddi_intr_block_enable() for MSI interrupts */
978 (void) ddi_intr_block_enable(ehcip->ehci_htable,
979 ehcip->ehci_intr_cnt);
980 } else {
981 /* Call ddi_intr_enable for MSI or FIXED interrupts */
982 for (i = 0; i < ehcip->ehci_intr_cnt; i++)
983 (void) ddi_intr_enable(ehcip->ehci_htable[i]);
984 }
985
986 return (DDI_SUCCESS);
987 }
988
989
990 /*
991 * ehci_init_hardware
992 *
993 * take control from BIOS, reset EHCI host controller, and check version, etc.
994 */
995 int
996 ehci_init_hardware(ehci_state_t *ehcip)
997 {
998 int revision;
999 uint16_t cmd_reg;
1000 int abort_on_BIOS_take_over_failure;
1001
1002 /* Take control from the BIOS */
1003 if (ehci_take_control(ehcip) != USB_SUCCESS) {
1004
1005 /* read .conf file properties */
1006 abort_on_BIOS_take_over_failure =
1007 ddi_prop_get_int(DDI_DEV_T_ANY,
1008 ehcip->ehci_dip, DDI_PROP_DONTPASS,
1009 "abort-on-BIOS-take-over-failure", 0);
1010
1011 if (abort_on_BIOS_take_over_failure) {
1012
1013 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1014 "Unable to take control from BIOS.");
1015
1016 return (DDI_FAILURE);
1017 }
1018
1019 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1020 "Unable to take control from BIOS. Failure is ignored.");
1021 }
1022
1023 /* set Memory Master Enable */
1024 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
1025 cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME);
1026 pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg);
1027
1028 /* Reset the EHCI host controller */
1029 Set_OpReg(ehci_command,
1030 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET);
1031
1032 /* Wait 10ms for reset to complete */
1033 drv_usecwait(EHCI_RESET_TIMEWAIT);
1034
1035 ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED);
1036
1037 /* Verify the version number */
1038 revision = Get_16Cap(ehci_version);
1039
1040 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1041 "ehci_init_hardware: Revision 0x%x", revision);
1042
1043 /*
1044 * EHCI driver supports EHCI host controllers compliant to
1045 * 0.95 and higher revisions of EHCI specifications.
1046 */
1047 if (revision < EHCI_REVISION_0_95) {
1048
1049 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1050 "Revision 0x%x is not supported", revision);
1051
1052 return (DDI_FAILURE);
1053 }
1054
1055 if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) {
1056
1057 /* Initialize the Frame list base address area */
1058 if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) {
1059
1060 return (DDI_FAILURE);
1061 }
1062
1063 /*
1064 * For performance reasons, do not insert anything into the
1065 * asynchronous list or activate the asynch list schedule until
1066 * there is a valid QH.
1067 */
1068 ehcip->ehci_head_of_async_sched_list = NULL;
1069
1070 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) &&
1071 (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) {
1072 /*
1073 * The driver is unable to reliably stop the asynch
1074 * list schedule on VIA VT6202 controllers, so we
1075 * always keep a dummy QH on the list.
1076 */
1077 ehci_qh_t *dummy_async_qh =
1078 ehci_alloc_qh(ehcip, NULL, NULL);
1079
1080 Set_QH(dummy_async_qh->qh_link_ptr,
1081 ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) &
1082 EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH));
1083
1084 /* Set this QH to be the "head" of the circular list */
1085 Set_QH(dummy_async_qh->qh_ctrl,
1086 Get_QH(dummy_async_qh->qh_ctrl) |
1087 EHCI_QH_CTRL_RECLAIM_HEAD);
1088
1089 Set_QH(dummy_async_qh->qh_next_qtd,
1090 EHCI_QH_NEXT_QTD_PTR_VALID);
1091 Set_QH(dummy_async_qh->qh_alt_next_qtd,
1092 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1093
1094 ehcip->ehci_head_of_async_sched_list = dummy_async_qh;
1095 ehcip->ehci_open_async_count++;
1096 ehcip->ehci_async_req_count++;
1097 }
1098 }
1099
1100 return (DDI_SUCCESS);
1101 }
1102
1103
1104 /*
1105 * ehci_init_workaround
1106 *
1107 * some workarounds during initializing ehci
1108 */
1109 int
1110 ehci_init_workaround(ehci_state_t *ehcip)
1111 {
1112 /*
1113 * Acer Labs Inc. M5273 EHCI controller does not send
1114 * interrupts unless the Root hub ports are routed to the EHCI
1115 * host controller; so route the ports now, before we test for
1116 * the presence of SOFs interrupts.
1117 */
1118 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1119 /* Route all Root hub ports to EHCI host controller */
1120 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1121 }
1122
1123 /*
1124 * VIA chips have some issues and may not work reliably.
1125 * Revisions >= 0x80 are part of a southbridge and appear
1126 * to be reliable with the workaround.
1127 * For revisions < 0x80, if we were bound using class
1128 * complain, else proceed. This will allow the user to
1129 * bind ehci specifically to this chip and not have the
1130 * warnings
1131 */
1132 if (ehcip->ehci_vendor_id == PCI_VENDOR_VIA) {
1133
1134 if (ehcip->ehci_rev_id >= PCI_VIA_REVISION_6212) {
1135
1136 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1137 "ehci_init_workaround: Applying VIA workarounds "
1138 "for the 6212 chip.");
1139
1140 } else if (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name,
1141 "pciclass,0c0320") == 0) {
1142
1143 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1144 "Due to recently discovered incompatibilities");
1145 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1146 "with this USB controller, USB2.x transfer");
1147 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1148 "support has been disabled. This device will");
1149 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1150 "continue to function as a USB1.x controller.");
1151 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1152 "If you are interested in enabling USB2.x");
1153 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1154 "support please, refer to the ehci(7D) man page.");
1155 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1156 "Please also refer to www.sun.com/io for");
1157 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1158 "Solaris Ready products and to");
1159 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1160 "www.sun.com/bigadmin/hcl for additional");
1161 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1162 "compatible USB products.");
1163
1164 return (DDI_FAILURE);
1165
1166 } else if (ehci_vt62x2_workaround) {
1167
1168 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1169 "Applying VIA workarounds");
1170 }
1171 }
1172
1173 return (DDI_SUCCESS);
1174 }
1175
1176 /*
1177 * ehci_init_ctlr:
1178 *
1179 * Initialize the Host Controller (HC).
1180 */
1181 int
1182 ehci_init_ctlr(ehci_state_t *ehcip, int init_type)
1183 {
1184 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:");
1185
1186 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1187
1188 if (ehci_init_hardware(ehcip) != DDI_SUCCESS) {
1189
1190 return (DDI_FAILURE);
1191 }
1192 }
1193
1194 /*
1195 * Check for Asynchronous schedule park capability feature. If this
1196 * feature is supported, then, program ehci command register with
1197 * appropriate values..
1198 */
1199 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) {
1200
1201 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1202 "ehci_init_ctlr: Async park mode is supported");
1203
1204 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1205 (EHCI_CMD_ASYNC_PARK_ENABLE |
1206 EHCI_CMD_ASYNC_PARK_COUNT_3)));
1207 }
1208
1209 /*
1210 * Check for programmable periodic frame list feature. If this
1211 * feature is supported, then, program ehci command register with
1212 * 1024 frame list value.
1213 */
1214 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) {
1215
1216 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1217 "ehci_init_ctlr: Variable programmable periodic "
1218 "frame list is supported");
1219
1220 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1221 EHCI_CMD_FRAME_1024_SIZE));
1222 }
1223
1224 /*
1225 * Currently EHCI driver doesn't support 64 bit addressing.
1226 *
1227 * If we are using 64 bit addressing capability, then, program
1228 * ehci_ctrl_segment register with 4 Gigabyte segment where all
1229 * of the interface data structures are allocated.
1230 */
1231 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) {
1232
1233 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1234 "ehci_init_ctlr: EHCI driver doesn't support "
1235 "64 bit addressing");
1236 }
1237
1238 /* 64 bit addressing is not support */
1239 Set_OpReg(ehci_ctrl_segment, 0x00000000);
1240
1241 /* Turn on/off the schedulers */
1242 ehci_toggle_scheduler(ehcip);
1243
1244 /* Set host controller soft state to operational */
1245 ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE;
1246
1247 /*
1248 * Set the Periodic Frame List Base Address register with the
1249 * starting physical address of the Periodic Frame List.
1250 */
1251 Set_OpReg(ehci_periodic_list_base,
1252 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
1253 EHCI_PERIODIC_LIST_BASE));
1254
1255 /*
1256 * Set ehci_interrupt to enable all interrupts except Root
1257 * Hub Status change interrupt.
1258 */
1259 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
1260 EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR |
1261 EHCI_INTR_USB);
1262
1263 /*
1264 * Set the desired interrupt threshold and turn on EHCI host controller.
1265 */
1266 Set_OpReg(ehci_command,
1267 ((Get_OpReg(ehci_command) & ~EHCI_CMD_INTR_THRESHOLD) |
1268 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
1269
1270 ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN);
1271
1272 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1273 if (ehci_init_workaround(ehcip) != DDI_SUCCESS) {
1274
1275 /* Set host controller soft state to error */
1276 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1277
1278 return (DDI_FAILURE);
1279 }
1280 }
1281
1282 /* Route all Root hub ports to EHCI host controller */
1283 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1284
1285 return (DDI_SUCCESS);
1286 }
1287
1288 /*
1289 * ehci_take_control:
1290 *
1291 * Handshake to take EHCI control from BIOS if necessary. Its only valid for
1292 * x86 machines, because sparc doesn't have a BIOS.
1293 * On x86 machine, the take control process includes
1294 * o get the base address of the extended capability list
1295 * o find out the capability for handoff synchronization in the list.
1296 * o check if BIOS has owned the host controller.
1297 * o set the OS Owned semaphore bit, ask the BIOS to release the ownership.
1298 * o wait for a constant time and check if BIOS has relinquished control.
1299 */
1300 /* ARGSUSED */
1301 static int
1302 ehci_take_control(ehci_state_t *ehcip)
1303 {
1304 #if defined(__x86)
1305 uint32_t extended_cap;
1306 uint32_t extended_cap_offset;
1307 uint32_t extended_cap_id;
1308 uint_t retry;
1309
1310 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1311 "ehci_take_control:");
1312
1313 /*
1314 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS
1315 * register.
1316 */
1317 extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >>
1318 EHCI_HCC_EECP_SHIFT;
1319
1320 /*
1321 * According EHCI Spec 2.2.4, if the extended capability offset is
1322 * less than 40h then its not valid. This means we don't need to
1323 * worry about BIOS handoff.
1324 */
1325 if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) {
1326
1327 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1328 "ehci_take_control: Hardware doesn't support legacy.");
1329
1330 goto success;
1331 }
1332
1333 /*
1334 * According EHCI Spec 2.1.7, A zero offset indicates the
1335 * end of the extended capability list.
1336 */
1337 while (extended_cap_offset) {
1338
1339 /* Get the extended capability value. */
1340 extended_cap = pci_config_get32(ehcip->ehci_config_handle,
1341 extended_cap_offset);
1342
1343 /*
1344 * It's possible that we'll receive an invalid PCI read here due
1345 * to something going wrong due to platform firmware. This has
1346 * been observed in the wild depending on the version of ACPI in
1347 * use. If this happens, we'll assume that the capability does
1348 * not exist and that we do not need to take control from the
1349 * BIOS.
1350 */
1351 if (extended_cap == PCI_EINVAL32) {
1352 extended_cap_id = EHCI_EX_CAP_ID_RESERVED;
1353 break;
1354 }
1355
1356 /* Get the capability ID */
1357 extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >>
1358 EHCI_EX_CAP_ID_SHIFT;
1359
1360 /* Check if the card support legacy */
1361 if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1362 break;
1363 }
1364
1365 /* Get the offset of the next capability */
1366 extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >>
1367 EHCI_EX_CAP_NEXT_PTR_SHIFT;
1368
1369 }
1370
1371 /*
1372 * Unable to find legacy support in hardware's extended capability list.
1373 * This means we don't need to worry about BIOS handoff.
1374 */
1375 if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1376
1377 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1378 "ehci_take_control: Hardware doesn't support legacy");
1379
1380 goto success;
1381 }
1382
1383 /* Check if BIOS has owned it. */
1384 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1385
1386 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1387 "ehci_take_control: BIOS does not own EHCI");
1388
1389 goto success;
1390 }
1391
1392 /*
1393 * According EHCI Spec 5.1, The OS driver initiates an ownership
1394 * request by setting the OS Owned semaphore to a one. The OS
1395 * waits for the BIOS Owned bit to go to a zero before attempting
1396 * to use the EHCI controller. The time that OS must wait for BIOS
1397 * to respond to the request for ownership is beyond the scope of
1398 * this specification.
1399 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms
1400 * for BIOS to release the ownership.
1401 */
1402 extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM;
1403 pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset,
1404 extended_cap);
1405
1406 for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) {
1407
1408 /* wait a special interval */
1409 #ifndef __lock_lint
1410 delay(drv_usectohz(EHCI_TAKEOVER_DELAY));
1411 #endif
1412 /* Check to see if the BIOS has released the ownership */
1413 extended_cap = pci_config_get32(
1414 ehcip->ehci_config_handle, extended_cap_offset);
1415
1416 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1417
1418 USB_DPRINTF_L3(PRINT_MASK_ATTA,
1419 ehcip->ehci_log_hdl,
1420 "ehci_take_control: BIOS has released "
1421 "the ownership. retry = %d", retry);
1422
1423 goto success;
1424 }
1425
1426 }
1427
1428 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1429 "ehci_take_control: take control from BIOS failed.");
1430
1431 return (USB_FAILURE);
1432
1433 success:
1434
1435 #endif /* __x86 */
1436 return (USB_SUCCESS);
1437 }
1438
1439
1440 /*
1441 * ehci_init_periodic_frame_list_table :
1442 *
1443 * Allocate the system memory and initialize Host Controller
1444 * Periodic Frame List table area. The starting of the Periodic
1445 * Frame List Table area must be 4096 byte aligned.
1446 */
1447 static int
1448 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip)
1449 {
1450 ddi_device_acc_attr_t dev_attr;
1451 size_t real_length;
1452 uint_t ccount;
1453 int result;
1454
1455 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1456
1457 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1458 "ehci_init_periodic_frame_lst_table:");
1459
1460 /* The host controller will be little endian */
1461 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1462 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1463 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1464
1465 /* Force the required 4K restrictive alignment */
1466 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT;
1467
1468 /* Create space for the Periodic Frame List */
1469 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
1470 DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) {
1471
1472 goto failure;
1473 }
1474
1475 if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle,
1476 sizeof (ehci_periodic_frame_list_t),
1477 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
1478 0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep,
1479 &real_length, &ehcip->ehci_pflt_mem_handle)) {
1480
1481 goto failure;
1482 }
1483
1484 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1485 "ehci_init_periodic_frame_lst_table: "
1486 "Real length %lu", real_length);
1487
1488 /* Map the whole Periodic Frame List into the I/O address space */
1489 result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle,
1490 NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep,
1491 real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1492 DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount);
1493
1494 if (result == DDI_DMA_MAPPED) {
1495 /* The cookie count should be 1 */
1496 if (ccount != 1) {
1497 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1498 "ehci_init_periodic_frame_lst_table: "
1499 "More than 1 cookie");
1500
1501 goto failure;
1502 }
1503 } else {
1504 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
1505
1506 goto failure;
1507 }
1508
1509 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1510 "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x",
1511 (void *)ehcip->ehci_periodic_frame_list_tablep,
1512 ehcip->ehci_pflt_cookie.dmac_address);
1513
1514 /*
1515 * DMA addresses for Periodic Frame List are bound.
1516 */
1517 ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND;
1518
1519 bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length);
1520
1521 /* Initialize the Periodic Frame List */
1522 ehci_build_interrupt_lattice(ehcip);
1523
1524 /* Reset Byte Alignment to Default */
1525 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1526
1527 return (DDI_SUCCESS);
1528 failure:
1529 /* Byte alignment */
1530 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1531
1532 return (DDI_FAILURE);
1533 }
1534
1535
1536 /*
1537 * ehci_build_interrupt_lattice:
1538 *
1539 * Construct the interrupt lattice tree using static Endpoint Descriptors
1540 * (QH). This interrupt lattice tree will have total of 32 interrupt QH
1541 * lists and the Host Controller (HC) processes one interrupt QH list in
1542 * every frame. The Host Controller traverses the periodic schedule by
1543 * constructing an array offset reference from the Periodic List Base Address
1544 * register and bits 12 to 3 of Frame Index register. It fetches the element
1545 * and begins traversing the graph of linked schedule data structures.
1546 */
1547 static void
1548 ehci_build_interrupt_lattice(ehci_state_t *ehcip)
1549 {
1550 ehci_qh_t *list_array = ehcip->ehci_qh_pool_addr;
1551 ushort_t ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS];
1552 ehci_periodic_frame_list_t *periodic_frame_list =
1553 ehcip->ehci_periodic_frame_list_tablep;
1554 ushort_t *temp, num_of_nodes;
1555 uintptr_t addr;
1556 int i, j, k;
1557
1558 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1559 "ehci_build_interrupt_lattice:");
1560
1561 /*
1562 * Reserve the first 63 Endpoint Descriptor (QH) structures
1563 * in the pool as static endpoints & these are required for
1564 * constructing interrupt lattice tree.
1565 */
1566 for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) {
1567 Set_QH(list_array[i].qh_state, EHCI_QH_STATIC);
1568 Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED);
1569 Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID);
1570 Set_QH(list_array[i].qh_alt_next_qtd,
1571 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1572 }
1573
1574 /*
1575 * Make sure that last Endpoint on the periodic frame list terminates
1576 * periodic schedule.
1577 */
1578 Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
1579
1580 /* Build the interrupt lattice tree */
1581 for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) {
1582 /*
1583 * The next pointer in the host controller endpoint
1584 * descriptor must contain an iommu address. Calculate
1585 * the offset into the cpu address and add this to the
1586 * starting iommu address.
1587 */
1588 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]);
1589
1590 Set_QH(list_array[2*i + 1].qh_link_ptr,
1591 addr | EHCI_QH_LINK_REF_QH);
1592 Set_QH(list_array[2*i + 2].qh_link_ptr,
1593 addr | EHCI_QH_LINK_REF_QH);
1594 }
1595
1596 /* Build the tree bottom */
1597 temp = (unsigned short *)
1598 kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP);
1599
1600 num_of_nodes = 1;
1601
1602 /*
1603 * Initialize the values which are used for setting up head pointers
1604 * for the 32ms scheduling lists which starts from the Periodic Frame
1605 * List.
1606 */
1607 for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) {
1608 for (j = 0, k = 0; k < num_of_nodes; k++, j++) {
1609 ehci_index[j++] = temp[k];
1610 ehci_index[j] = temp[k] + ehci_pow_2(i);
1611 }
1612
1613 num_of_nodes *= 2;
1614 for (k = 0; k < num_of_nodes; k++)
1615 temp[k] = ehci_index[k];
1616 }
1617
1618 kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2));
1619
1620 /*
1621 * Initialize the interrupt list in the Periodic Frame List Table
1622 * so that it points to the bottom of the tree.
1623 */
1624 for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) {
1625 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)
1626 (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1]));
1627
1628 ASSERT(addr);
1629
1630 for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) {
1631 Set_PFLT(periodic_frame_list->
1632 ehci_periodic_frame_list_table[ehci_index[j++]],
1633 (uint32_t)(addr | EHCI_QH_LINK_REF_QH));
1634 }
1635 }
1636 }
1637
1638
1639 /*
1640 * ehci_alloc_hcdi_ops:
1641 *
1642 * The HCDI interfaces or entry points are the software interfaces used by
1643 * the Universal Serial Bus Driver (USBA) to access the services of the
1644 * Host Controller Driver (HCD). During HCD initialization, inform USBA
1645 * about all available HCDI interfaces or entry points.
1646 */
1647 usba_hcdi_ops_t *
1648 ehci_alloc_hcdi_ops(ehci_state_t *ehcip)
1649 {
1650 usba_hcdi_ops_t *usba_hcdi_ops;
1651
1652 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1653 "ehci_alloc_hcdi_ops:");
1654
1655 usba_hcdi_ops = usba_alloc_hcdi_ops();
1656
1657 usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION;
1658
1659 usba_hcdi_ops->usba_hcdi_pm_support = ehci_hcdi_pm_support;
1660 usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open;
1661 usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close;
1662
1663 usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset;
1664 usba_hcdi_ops->usba_hcdi_pipe_reset_data_toggle =
1665 ehci_hcdi_pipe_reset_data_toggle;
1666
1667 usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer;
1668 usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer;
1669 usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer;
1670 usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer;
1671
1672 usba_hcdi_ops->usba_hcdi_bulk_transfer_size =
1673 ehci_hcdi_bulk_transfer_size;
1674
1675 usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling =
1676 ehci_hcdi_pipe_stop_intr_polling;
1677 usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling =
1678 ehci_hcdi_pipe_stop_isoc_polling;
1679
1680 usba_hcdi_ops->usba_hcdi_get_current_frame_number =
1681 ehci_hcdi_get_current_frame_number;
1682 usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts =
1683 ehci_hcdi_get_max_isoc_pkts;
1684
1685 usba_hcdi_ops->usba_hcdi_console_input_init =
1686 ehci_hcdi_polled_input_init;
1687 usba_hcdi_ops->usba_hcdi_console_input_enter =
1688 ehci_hcdi_polled_input_enter;
1689 usba_hcdi_ops->usba_hcdi_console_read =
1690 ehci_hcdi_polled_read;
1691 usba_hcdi_ops->usba_hcdi_console_input_exit =
1692 ehci_hcdi_polled_input_exit;
1693 usba_hcdi_ops->usba_hcdi_console_input_fini =
1694 ehci_hcdi_polled_input_fini;
1695
1696 usba_hcdi_ops->usba_hcdi_console_output_init =
1697 ehci_hcdi_polled_output_init;
1698 usba_hcdi_ops->usba_hcdi_console_output_enter =
1699 ehci_hcdi_polled_output_enter;
1700 usba_hcdi_ops->usba_hcdi_console_write =
1701 ehci_hcdi_polled_write;
1702 usba_hcdi_ops->usba_hcdi_console_output_exit =
1703 ehci_hcdi_polled_output_exit;
1704 usba_hcdi_ops->usba_hcdi_console_output_fini =
1705 ehci_hcdi_polled_output_fini;
1706 return (usba_hcdi_ops);
1707 }
1708
1709
1710 /*
1711 * Host Controller Driver (HCD) deinitialization functions
1712 */
1713
1714 /*
1715 * ehci_cleanup:
1716 *
1717 * Cleanup on attach failure or detach
1718 */
1719 int
1720 ehci_cleanup(ehci_state_t *ehcip)
1721 {
1722 ehci_trans_wrapper_t *tw;
1723 ehci_pipe_private_t *pp;
1724 ehci_qtd_t *qtd;
1725 int i, ctrl, rval;
1726 int flags = ehcip->ehci_flags;
1727
1728 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:");
1729
1730 if (flags & EHCI_RHREG) {
1731 /* Unload the root hub driver */
1732 if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) {
1733
1734 return (DDI_FAILURE);
1735 }
1736 }
1737
1738 if (flags & EHCI_USBAREG) {
1739 /* Unregister this HCD instance with USBA */
1740 usba_hcdi_unregister(ehcip->ehci_dip);
1741 }
1742
1743 if (flags & EHCI_INTR) {
1744
1745 mutex_enter(&ehcip->ehci_int_mutex);
1746
1747 /* Disable all EHCI QH list processing */
1748 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
1749 ~(EHCI_CMD_ASYNC_SCHED_ENABLE |
1750 EHCI_CMD_PERIODIC_SCHED_ENABLE)));
1751
1752 /* Disable all EHCI interrupts */
1753 Set_OpReg(ehci_interrupt, 0);
1754
1755 /* wait for the next SOF */
1756 (void) ehci_wait_for_sof(ehcip);
1757
1758 /* Route all Root hub ports to Classic host controller */
1759 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1760
1761 /* Stop the EHCI host controller */
1762 Set_OpReg(ehci_command,
1763 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
1764
1765 mutex_exit(&ehcip->ehci_int_mutex);
1766
1767 /* Wait for sometime */
1768 delay(drv_usectohz(EHCI_TIMEWAIT));
1769
1770 ehci_rem_intrs(ehcip);
1771 }
1772
1773 /* Unmap the EHCI registers */
1774 if (ehcip->ehci_caps_handle) {
1775 ddi_regs_map_free(&ehcip->ehci_caps_handle);
1776 }
1777
1778 if (ehcip->ehci_config_handle) {
1779 pci_config_teardown(&ehcip->ehci_config_handle);
1780 }
1781
1782 /* Free all the buffers */
1783 if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) {
1784 for (i = 0; i < ehci_qtd_pool_size; i ++) {
1785 qtd = &ehcip->ehci_qtd_pool_addr[i];
1786 ctrl = Get_QTD(ehcip->
1787 ehci_qtd_pool_addr[i].qtd_state);
1788
1789 if ((ctrl != EHCI_QTD_FREE) &&
1790 (ctrl != EHCI_QTD_DUMMY) &&
1791 (qtd->qtd_trans_wrapper)) {
1792
1793 mutex_enter(&ehcip->ehci_int_mutex);
1794
1795 tw = (ehci_trans_wrapper_t *)
1796 EHCI_LOOKUP_ID((uint32_t)
1797 Get_QTD(qtd->qtd_trans_wrapper));
1798
1799 /* Obtain the pipe private structure */
1800 pp = tw->tw_pipe_private;
1801
1802 /* Stop the the transfer timer */
1803 ehci_stop_xfer_timer(ehcip, tw,
1804 EHCI_REMOVE_XFER_ALWAYS);
1805
1806 ehci_deallocate_tw(ehcip, pp, tw);
1807
1808 mutex_exit(&ehcip->ehci_int_mutex);
1809 }
1810 }
1811
1812 /*
1813 * If EHCI_QTD_POOL_BOUND flag is set, then unbind
1814 * the handle for QTD pools.
1815 */
1816 if ((ehcip->ehci_dma_addr_bind_flag &
1817 EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) {
1818
1819 rval = ddi_dma_unbind_handle(
1820 ehcip->ehci_qtd_pool_dma_handle);
1821
1822 ASSERT(rval == DDI_SUCCESS);
1823 }
1824 ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle);
1825 }
1826
1827 /* Free the QTD pool */
1828 if (ehcip->ehci_qtd_pool_dma_handle) {
1829 ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle);
1830 }
1831
1832 if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) {
1833 /*
1834 * If EHCI_QH_POOL_BOUND flag is set, then unbind
1835 * the handle for QH pools.
1836 */
1837 if ((ehcip->ehci_dma_addr_bind_flag &
1838 EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) {
1839
1840 rval = ddi_dma_unbind_handle(
1841 ehcip->ehci_qh_pool_dma_handle);
1842
1843 ASSERT(rval == DDI_SUCCESS);
1844 }
1845
1846 ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle);
1847 }
1848
1849 /* Free the QH pool */
1850 if (ehcip->ehci_qh_pool_dma_handle) {
1851 ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle);
1852 }
1853
1854 /* Free the Periodic frame list table (PFLT) area */
1855 if (ehcip->ehci_periodic_frame_list_tablep &&
1856 ehcip->ehci_pflt_mem_handle) {
1857 /*
1858 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind
1859 * the handle for PFLT.
1860 */
1861 if ((ehcip->ehci_dma_addr_bind_flag &
1862 EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) {
1863
1864 rval = ddi_dma_unbind_handle(
1865 ehcip->ehci_pflt_dma_handle);
1866
1867 ASSERT(rval == DDI_SUCCESS);
1868 }
1869
1870 ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle);
1871 }
1872
1873 (void) ehci_isoc_cleanup(ehcip);
1874
1875 if (ehcip->ehci_pflt_dma_handle) {
1876 ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle);
1877 }
1878
1879 if (flags & EHCI_INTR) {
1880 /* Destroy the mutex */
1881 mutex_destroy(&ehcip->ehci_int_mutex);
1882 }
1883
1884 /* clean up kstat structs */
1885 ehci_destroy_stats(ehcip);
1886
1887 /* Free ehci hcdi ops */
1888 if (ehcip->ehci_hcdi_ops) {
1889 usba_free_hcdi_ops(ehcip->ehci_hcdi_ops);
1890 }
1891
1892 if (flags & EHCI_ZALLOC) {
1893
1894 usb_free_log_hdl(ehcip->ehci_log_hdl);
1895
1896 /* Remove all properties that might have been created */
1897 ddi_prop_remove_all(ehcip->ehci_dip);
1898
1899 /* Free the soft state */
1900 ddi_soft_state_free(ehci_statep,
1901 ddi_get_instance(ehcip->ehci_dip));
1902 }
1903
1904 return (DDI_SUCCESS);
1905 }
1906
1907
1908 /*
1909 * ehci_rem_intrs:
1910 *
1911 * Unregister FIXED or MSI interrupts
1912 */
1913 static void
1914 ehci_rem_intrs(ehci_state_t *ehcip)
1915 {
1916 int i;
1917
1918 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1919 "ehci_rem_intrs: interrupt type 0x%x", ehcip->ehci_intr_type);
1920
1921 /* Disable all interrupts */
1922 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
1923 (void) ddi_intr_block_disable(ehcip->ehci_htable,
1924 ehcip->ehci_intr_cnt);
1925 } else {
1926 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
1927 (void) ddi_intr_disable(ehcip->ehci_htable[i]);
1928 }
1929 }
1930
1931 /* Call ddi_intr_remove_handler() */
1932 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
1933 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
1934 (void) ddi_intr_free(ehcip->ehci_htable[i]);
1935 }
1936
1937 kmem_free(ehcip->ehci_htable,
1938 ehcip->ehci_intr_cnt * sizeof (ddi_intr_handle_t));
1939 }
1940
1941
1942 /*
1943 * ehci_cpr_suspend
1944 */
1945 int
1946 ehci_cpr_suspend(ehci_state_t *ehcip)
1947 {
1948 int i;
1949
1950 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1951 "ehci_cpr_suspend:");
1952
1953 /* Call into the root hub and suspend it */
1954 if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) {
1955
1956 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1957 "ehci_cpr_suspend: root hub fails to suspend");
1958
1959 return (DDI_FAILURE);
1960 }
1961
1962 /* Only root hub's intr pipe should be open at this time */
1963 mutex_enter(&ehcip->ehci_int_mutex);
1964
1965 ASSERT(ehcip->ehci_open_pipe_count == 0);
1966
1967 /* Just wait till all resources are reclaimed */
1968 i = 0;
1969 while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) {
1970 ehci_handle_endpoint_reclaimation(ehcip);
1971 (void) ehci_wait_for_sof(ehcip);
1972 }
1973 ASSERT(ehcip->ehci_reclaim_list == NULL);
1974
1975 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1976 "ehci_cpr_suspend: Disable HC QH list processing");
1977
1978 /* Disable all EHCI QH list processing */
1979 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
1980 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)));
1981
1982 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1983 "ehci_cpr_suspend: Disable HC interrupts");
1984
1985 /* Disable all EHCI interrupts */
1986 Set_OpReg(ehci_interrupt, 0);
1987
1988 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1989 "ehci_cpr_suspend: Wait for the next SOF");
1990
1991 /* Wait for the next SOF */
1992 if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) {
1993
1994 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1995 "ehci_cpr_suspend: ehci host controller suspend failed");
1996
1997 mutex_exit(&ehcip->ehci_int_mutex);
1998 return (DDI_FAILURE);
1999 }
2000
2001 /*
2002 * Stop the ehci host controller
2003 * if usb keyboard is not connected.
2004 */
2005 if (ehcip->ehci_polled_kbd_count == 0 || force_ehci_off != 0) {
2006 Set_OpReg(ehci_command,
2007 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
2008
2009 }
2010
2011 /* Set host controller soft state to suspend */
2012 ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE;
2013
2014 mutex_exit(&ehcip->ehci_int_mutex);
2015
2016 return (DDI_SUCCESS);
2017 }
2018
2019
2020 /*
2021 * ehci_cpr_resume
2022 */
2023 int
2024 ehci_cpr_resume(ehci_state_t *ehcip)
2025 {
2026 mutex_enter(&ehcip->ehci_int_mutex);
2027
2028 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2029 "ehci_cpr_resume: Restart the controller");
2030
2031 /* Cleanup ehci specific information across cpr */
2032 ehci_cpr_cleanup(ehcip);
2033
2034 /* Restart the controller */
2035 if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) {
2036
2037 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2038 "ehci_cpr_resume: ehci host controller resume failed ");
2039
2040 mutex_exit(&ehcip->ehci_int_mutex);
2041
2042 return (DDI_FAILURE);
2043 }
2044
2045 mutex_exit(&ehcip->ehci_int_mutex);
2046
2047 /* Now resume the root hub */
2048 if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) {
2049
2050 return (DDI_FAILURE);
2051 }
2052
2053 return (DDI_SUCCESS);
2054 }
2055
2056
2057 /*
2058 * Bandwidth Allocation functions
2059 */
2060
2061 /*
2062 * ehci_allocate_bandwidth:
2063 *
2064 * Figure out whether or not this interval may be supported. Return the index
2065 * into the lattice if it can be supported. Return allocation failure if it
2066 * can not be supported.
2067 */
2068 int
2069 ehci_allocate_bandwidth(
2070 ehci_state_t *ehcip,
2071 usba_pipe_handle_data_t *ph,
2072 uint_t *pnode,
2073 uchar_t *smask,
2074 uchar_t *cmask)
2075 {
2076 int error = USB_SUCCESS;
2077
2078 /* This routine is protected by the ehci_int_mutex */
2079 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2080
2081 /* Reset the pnode to the last checked pnode */
2082 *pnode = 0;
2083
2084 /* Allocate high speed bandwidth */
2085 if ((error = ehci_allocate_high_speed_bandwidth(ehcip,
2086 ph, pnode, smask, cmask)) != USB_SUCCESS) {
2087
2088 return (error);
2089 }
2090
2091 /*
2092 * For low/full speed usb devices, allocate classic TT bandwidth
2093 * in additional to high speed bandwidth.
2094 */
2095 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2096
2097 /* Allocate classic TT bandwidth */
2098 if ((error = ehci_allocate_classic_tt_bandwidth(
2099 ehcip, ph, *pnode)) != USB_SUCCESS) {
2100
2101 /* Deallocate high speed bandwidth */
2102 ehci_deallocate_high_speed_bandwidth(
2103 ehcip, ph, *pnode, *smask, *cmask);
2104 }
2105 }
2106
2107 return (error);
2108 }
2109
2110
2111 /*
2112 * ehci_allocate_high_speed_bandwidth:
2113 *
2114 * Allocate high speed bandwidth for the low/full/high speed interrupt and
2115 * isochronous endpoints.
2116 */
2117 static int
2118 ehci_allocate_high_speed_bandwidth(
2119 ehci_state_t *ehcip,
2120 usba_pipe_handle_data_t *ph,
2121 uint_t *pnode,
2122 uchar_t *smask,
2123 uchar_t *cmask)
2124 {
2125 uint_t sbandwidth, cbandwidth;
2126 int interval;
2127 usb_ep_descr_t *endpoint = &ph->p_ep;
2128 usba_device_t *child_ud;
2129 usb_port_status_t port_status;
2130 int error;
2131
2132 /* This routine is protected by the ehci_int_mutex */
2133 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2134
2135 /* Get child's usba device structure */
2136 child_ud = ph->p_usba_device;
2137
2138 mutex_enter(&child_ud->usb_mutex);
2139
2140 /* Get the current usb device's port status */
2141 port_status = ph->p_usba_device->usb_port_status;
2142
2143 mutex_exit(&child_ud->usb_mutex);
2144
2145 /*
2146 * Calculate the length in bytes of a transaction on this
2147 * periodic endpoint. Return failure if maximum packet is
2148 * zero.
2149 */
2150 error = ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2151 port_status, &sbandwidth, &cbandwidth);
2152 if (error != USB_SUCCESS) {
2153
2154 return (error);
2155 }
2156
2157 /*
2158 * Adjust polling interval to be a power of 2.
2159 * If this interval can't be supported, return
2160 * allocation failure.
2161 */
2162 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2163 if (interval == USB_FAILURE) {
2164
2165 return (USB_FAILURE);
2166 }
2167
2168 if (port_status == USBA_HIGH_SPEED_DEV) {
2169 /* Allocate bandwidth for high speed devices */
2170 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2171 USB_EP_ATTR_ISOCH) {
2172 error = USB_SUCCESS;
2173 } else {
2174
2175 error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode,
2176 endpoint, sbandwidth, interval);
2177 }
2178
2179 *cmask = 0x00;
2180
2181 } else {
2182 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2183 USB_EP_ATTR_INTR) {
2184
2185 /* Allocate bandwidth for low speed interrupt */
2186 error = ehci_find_bestfit_ls_intr_mask(ehcip,
2187 smask, cmask, pnode, sbandwidth, cbandwidth,
2188 interval);
2189 } else {
2190 if ((endpoint->bEndpointAddress &
2191 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2192
2193 /* Allocate bandwidth for sitd in */
2194 error = ehci_find_bestfit_sitd_in_mask(ehcip,
2195 smask, cmask, pnode, sbandwidth, cbandwidth,
2196 interval);
2197 } else {
2198
2199 /* Allocate bandwidth for sitd out */
2200 error = ehci_find_bestfit_sitd_out_mask(ehcip,
2201 smask, pnode, sbandwidth, interval);
2202 *cmask = 0x00;
2203 }
2204 }
2205 }
2206
2207 if (error != USB_SUCCESS) {
2208 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2209 "ehci_allocate_high_speed_bandwidth: Reached maximum "
2210 "bandwidth value and cannot allocate bandwidth for a "
2211 "given high-speed periodic endpoint");
2212
2213 return (USB_NO_BANDWIDTH);
2214 }
2215
2216 return (error);
2217 }
2218
2219
2220 /*
2221 * ehci_allocate_classic_tt_speed_bandwidth:
2222 *
2223 * Allocate classic TT bandwidth for the low/full speed interrupt and
2224 * isochronous endpoints.
2225 */
2226 static int
2227 ehci_allocate_classic_tt_bandwidth(
2228 ehci_state_t *ehcip,
2229 usba_pipe_handle_data_t *ph,
2230 uint_t pnode)
2231 {
2232 uint_t bandwidth, min;
2233 uint_t height, leftmost, list;
2234 usb_ep_descr_t *endpoint = &ph->p_ep;
2235 usba_device_t *child_ud, *parent_ud;
2236 usb_port_status_t port_status;
2237 int i, interval;
2238
2239 /* This routine is protected by the ehci_int_mutex */
2240 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2241
2242 /* Get child's usba device structure */
2243 child_ud = ph->p_usba_device;
2244
2245 mutex_enter(&child_ud->usb_mutex);
2246
2247 /* Get the current usb device's port status */
2248 port_status = child_ud->usb_port_status;
2249
2250 /* Get the parent high speed hub's usba device structure */
2251 parent_ud = child_ud->usb_hs_hub_usba_dev;
2252
2253 mutex_exit(&child_ud->usb_mutex);
2254
2255 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2256 "ehci_allocate_classic_tt_bandwidth: "
2257 "child_ud 0x%p parent_ud 0x%p",
2258 (void *)child_ud, (void *)parent_ud);
2259
2260 /*
2261 * Calculate the length in bytes of a transaction on this
2262 * periodic endpoint. Return failure if maximum packet is
2263 * zero.
2264 */
2265 if (ehci_compute_classic_bandwidth(endpoint,
2266 port_status, &bandwidth) != USB_SUCCESS) {
2267
2268 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2269 "ehci_allocate_classic_tt_bandwidth: Periodic endpoint "
2270 "with zero endpoint maximum packet size is not supported");
2271
2272 return (USB_NOT_SUPPORTED);
2273 }
2274
2275 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2276 "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth);
2277
2278 mutex_enter(&parent_ud->usb_mutex);
2279
2280 /*
2281 * If the length in bytes plus the allocated bandwidth exceeds
2282 * the maximum, return bandwidth allocation failure.
2283 */
2284 if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) >
2285 FS_PERIODIC_BANDWIDTH) {
2286
2287 mutex_exit(&parent_ud->usb_mutex);
2288
2289 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2290 "ehci_allocate_classic_tt_bandwidth: Reached maximum "
2291 "bandwidth value and cannot allocate bandwidth for a "
2292 "given low/full speed periodic endpoint");
2293
2294 return (USB_NO_BANDWIDTH);
2295 }
2296
2297 mutex_exit(&parent_ud->usb_mutex);
2298
2299 /* Adjust polling interval to be a power of 2 */
2300 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2301
2302 /* Find the height in the tree */
2303 height = ehci_lattice_height(interval);
2304
2305 /* Find the leftmost leaf in the subtree specified by the node. */
2306 leftmost = ehci_leftmost_leaf(pnode, height);
2307
2308 mutex_enter(&parent_ud->usb_mutex);
2309
2310 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2311 list = ehci_index[leftmost + i];
2312
2313 if ((parent_ud->usb_hs_hub_bandwidth[list] +
2314 bandwidth) > FS_PERIODIC_BANDWIDTH) {
2315
2316 mutex_exit(&parent_ud->usb_mutex);
2317
2318 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2319 "ehci_allocate_classic_tt_bandwidth: Reached "
2320 "maximum bandwidth value and cannot allocate "
2321 "bandwidth for low/full periodic endpoint");
2322
2323 return (USB_NO_BANDWIDTH);
2324 }
2325 }
2326
2327 /*
2328 * All the leaves for this node must be updated with the bandwidth.
2329 */
2330 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2331 list = ehci_index[leftmost + i];
2332 parent_ud->usb_hs_hub_bandwidth[list] += bandwidth;
2333 }
2334
2335 /* Find the leaf with the smallest allocated bandwidth */
2336 min = parent_ud->usb_hs_hub_bandwidth[0];
2337
2338 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2339 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2340 min = parent_ud->usb_hs_hub_bandwidth[i];
2341 }
2342 }
2343
2344 /* Save the minimum for later use */
2345 parent_ud->usb_hs_hub_min_bandwidth = min;
2346
2347 mutex_exit(&parent_ud->usb_mutex);
2348
2349 return (USB_SUCCESS);
2350 }
2351
2352
2353 /*
2354 * ehci_deallocate_bandwidth:
2355 *
2356 * Deallocate bandwidth for the given node in the lattice and the length
2357 * of transfer.
2358 */
2359 void
2360 ehci_deallocate_bandwidth(
2361 ehci_state_t *ehcip,
2362 usba_pipe_handle_data_t *ph,
2363 uint_t pnode,
2364 uchar_t smask,
2365 uchar_t cmask)
2366 {
2367 /* This routine is protected by the ehci_int_mutex */
2368 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2369
2370 ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask);
2371
2372 /*
2373 * For low/full speed usb devices, deallocate classic TT bandwidth
2374 * in additional to high speed bandwidth.
2375 */
2376 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2377
2378 /* Deallocate classic TT bandwidth */
2379 ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode);
2380 }
2381 }
2382
2383
2384 /*
2385 * ehci_deallocate_high_speed_bandwidth:
2386 *
2387 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2388 */
2389 static void
2390 ehci_deallocate_high_speed_bandwidth(
2391 ehci_state_t *ehcip,
2392 usba_pipe_handle_data_t *ph,
2393 uint_t pnode,
2394 uchar_t smask,
2395 uchar_t cmask)
2396 {
2397 uint_t height, leftmost;
2398 uint_t list_count;
2399 uint_t sbandwidth, cbandwidth;
2400 int interval;
2401 usb_ep_descr_t *endpoint = &ph->p_ep;
2402 usba_device_t *child_ud;
2403 usb_port_status_t port_status;
2404
2405 /* This routine is protected by the ehci_int_mutex */
2406 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2407
2408 /* Get child's usba device structure */
2409 child_ud = ph->p_usba_device;
2410
2411 mutex_enter(&child_ud->usb_mutex);
2412
2413 /* Get the current usb device's port status */
2414 port_status = ph->p_usba_device->usb_port_status;
2415
2416 mutex_exit(&child_ud->usb_mutex);
2417
2418 (void) ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2419 port_status, &sbandwidth, &cbandwidth);
2420
2421 /* Adjust polling interval to be a power of 2 */
2422 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2423
2424 /* Find the height in the tree */
2425 height = ehci_lattice_height(interval);
2426
2427 /*
2428 * Find the leftmost leaf in the subtree specified by the node
2429 */
2430 leftmost = ehci_leftmost_leaf(pnode, height);
2431
2432 list_count = EHCI_NUM_INTR_QH_LISTS/interval;
2433
2434 /* Delete the bandwidth from the appropriate lists */
2435 if (port_status == USBA_HIGH_SPEED_DEV) {
2436
2437 ehci_update_bw_availability(ehcip, -sbandwidth,
2438 leftmost, list_count, smask);
2439 } else {
2440 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2441 USB_EP_ATTR_INTR) {
2442
2443 ehci_update_bw_availability(ehcip, -sbandwidth,
2444 leftmost, list_count, smask);
2445 ehci_update_bw_availability(ehcip, -cbandwidth,
2446 leftmost, list_count, cmask);
2447 } else {
2448 if ((endpoint->bEndpointAddress &
2449 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2450
2451 ehci_update_bw_availability(ehcip, -sbandwidth,
2452 leftmost, list_count, smask);
2453 ehci_update_bw_availability(ehcip,
2454 -MAX_UFRAME_SITD_XFER, leftmost,
2455 list_count, cmask);
2456 } else {
2457
2458 ehci_update_bw_availability(ehcip,
2459 -MAX_UFRAME_SITD_XFER, leftmost,
2460 list_count, smask);
2461 }
2462 }
2463 }
2464 }
2465
2466 /*
2467 * ehci_deallocate_classic_tt_bandwidth:
2468 *
2469 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2470 */
2471 static void
2472 ehci_deallocate_classic_tt_bandwidth(
2473 ehci_state_t *ehcip,
2474 usba_pipe_handle_data_t *ph,
2475 uint_t pnode)
2476 {
2477 uint_t bandwidth, height, leftmost, list, min;
2478 int i, interval;
2479 usb_ep_descr_t *endpoint = &ph->p_ep;
2480 usba_device_t *child_ud, *parent_ud;
2481 usb_port_status_t port_status;
2482
2483 /* This routine is protected by the ehci_int_mutex */
2484 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2485
2486 /* Get child's usba device structure */
2487 child_ud = ph->p_usba_device;
2488
2489 mutex_enter(&child_ud->usb_mutex);
2490
2491 /* Get the current usb device's port status */
2492 port_status = child_ud->usb_port_status;
2493
2494 /* Get the parent high speed hub's usba device structure */
2495 parent_ud = child_ud->usb_hs_hub_usba_dev;
2496
2497 mutex_exit(&child_ud->usb_mutex);
2498
2499 /* Obtain the bandwidth */
2500 (void) ehci_compute_classic_bandwidth(endpoint,
2501 port_status, &bandwidth);
2502
2503 /* Adjust polling interval to be a power of 2 */
2504 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2505
2506 /* Find the height in the tree */
2507 height = ehci_lattice_height(interval);
2508
2509 /* Find the leftmost leaf in the subtree specified by the node */
2510 leftmost = ehci_leftmost_leaf(pnode, height);
2511
2512 mutex_enter(&parent_ud->usb_mutex);
2513
2514 /* Delete the bandwidth from the appropriate lists */
2515 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2516 list = ehci_index[leftmost + i];
2517 parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth;
2518 }
2519
2520 /* Find the leaf with the smallest allocated bandwidth */
2521 min = parent_ud->usb_hs_hub_bandwidth[0];
2522
2523 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2524 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2525 min = parent_ud->usb_hs_hub_bandwidth[i];
2526 }
2527 }
2528
2529 /* Save the minimum for later use */
2530 parent_ud->usb_hs_hub_min_bandwidth = min;
2531
2532 mutex_exit(&parent_ud->usb_mutex);
2533 }
2534
2535
2536 /*
2537 * ehci_compute_high_speed_bandwidth:
2538 *
2539 * Given a periodic endpoint (interrupt or isochronous) determine the total
2540 * bandwidth for one transaction. The EHCI host controller traverses the
2541 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2542 * services an endpoint, only a single transaction attempt is made. The HC
2543 * moves to the next Endpoint Descriptor after the first transaction attempt
2544 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2545 * Transfer Descriptor is inserted into the lattice, we will only count the
2546 * number of bytes for one transaction.
2547 *
2548 * The following are the formulas used for calculating bandwidth in terms
2549 * bytes and it is for the single USB high speed transaction. The protocol
2550 * overheads will be different for each of type of USB transfer & all these
2551 * formulas & protocol overheads are derived from the 5.11.3 section of the
2552 * USB 2.0 Specification.
2553 *
2554 * High-Speed:
2555 * Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay
2556 *
2557 * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub)
2558 *
2559 * Protocol overhead + Split transaction overhead +
2560 * ((MaxPktSz * 7)/6) + Host_Delay;
2561 */
2562 /* ARGSUSED */
2563 static int
2564 ehci_compute_high_speed_bandwidth(
2565 ehci_state_t *ehcip,
2566 usb_ep_descr_t *endpoint,
2567 usb_port_status_t port_status,
2568 uint_t *sbandwidth,
2569 uint_t *cbandwidth)
2570 {
2571 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2572
2573 /* Return failure if endpoint maximum packet is zero */
2574 if (maxpacketsize == 0) {
2575 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2576 "ehci_allocate_high_speed_bandwidth: Periodic endpoint "
2577 "with zero endpoint maximum packet size is not supported");
2578
2579 return (USB_NOT_SUPPORTED);
2580 }
2581
2582 /* Add bit-stuffing overhead */
2583 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2584
2585 /* Add Host Controller specific delay to required bandwidth */
2586 *sbandwidth = EHCI_HOST_CONTROLLER_DELAY;
2587
2588 /* Add xfer specific protocol overheads */
2589 if ((endpoint->bmAttributes &
2590 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2591 /* High speed interrupt transaction */
2592 *sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD;
2593 } else {
2594 /* Isochronous transaction */
2595 *sbandwidth += HS_ISOC_PROTO_OVERHEAD;
2596 }
2597
2598 /*
2599 * For low/full speed devices, add split transaction specific
2600 * overheads.
2601 */
2602 if (port_status != USBA_HIGH_SPEED_DEV) {
2603 /*
2604 * Add start and complete split transaction
2605 * tokens overheads.
2606 */
2607 *cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD;
2608 *sbandwidth += START_SPLIT_OVERHEAD;
2609
2610 /* Add data overhead depending on data direction */
2611 if ((endpoint->bEndpointAddress &
2612 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2613 *cbandwidth += maxpacketsize;
2614 } else {
2615 if ((endpoint->bmAttributes &
2616 USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) {
2617 /* There is no compete splits for out */
2618 *cbandwidth = 0;
2619 }
2620 *sbandwidth += maxpacketsize;
2621 }
2622 } else {
2623 uint_t xactions;
2624
2625 /* Get the max transactions per microframe */
2626 xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >>
2627 USB_EP_MAX_XACTS_SHIFT) + 1;
2628
2629 /* High speed transaction */
2630 *sbandwidth += maxpacketsize;
2631
2632 /* Calculate bandwidth per micro-frame */
2633 *sbandwidth *= xactions;
2634
2635 *cbandwidth = 0;
2636 }
2637
2638 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2639 "ehci_allocate_high_speed_bandwidth: "
2640 "Start split bandwidth %d Complete split bandwidth %d",
2641 *sbandwidth, *cbandwidth);
2642
2643 return (USB_SUCCESS);
2644 }
2645
2646
2647 /*
2648 * ehci_compute_classic_bandwidth:
2649 *
2650 * Given a periodic endpoint (interrupt or isochronous) determine the total
2651 * bandwidth for one transaction. The EHCI host controller traverses the
2652 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2653 * services an endpoint, only a single transaction attempt is made. The HC
2654 * moves to the next Endpoint Descriptor after the first transaction attempt
2655 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2656 * Transfer Descriptor is inserted into the lattice, we will only count the
2657 * number of bytes for one transaction.
2658 *
2659 * The following are the formulas used for calculating bandwidth in terms
2660 * bytes and it is for the single USB high speed transaction. The protocol
2661 * overheads will be different for each of type of USB transfer & all these
2662 * formulas & protocol overheads are derived from the 5.11.3 section of the
2663 * USB 2.0 Specification.
2664 *
2665 * Low-Speed:
2666 * Protocol overhead + Hub LS overhead +
2667 * (Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay
2668 *
2669 * Full-Speed:
2670 * Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay
2671 */
2672 /* ARGSUSED */
2673 static int
2674 ehci_compute_classic_bandwidth(
2675 usb_ep_descr_t *endpoint,
2676 usb_port_status_t port_status,
2677 uint_t *bandwidth)
2678 {
2679 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2680
2681 /*
2682 * If endpoint maximum packet is zero, then return immediately.
2683 */
2684 if (maxpacketsize == 0) {
2685
2686 return (USB_NOT_SUPPORTED);
2687 }
2688
2689 /* Add TT delay to required bandwidth */
2690 *bandwidth = TT_DELAY;
2691
2692 /* Add bit-stuffing overhead */
2693 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2694
2695 switch (port_status) {
2696 case USBA_LOW_SPEED_DEV:
2697 /* Low speed interrupt transaction */
2698 *bandwidth += (LOW_SPEED_PROTO_OVERHEAD +
2699 HUB_LOW_SPEED_PROTO_OVERHEAD +
2700 (LOW_SPEED_CLOCK * maxpacketsize));
2701 break;
2702 case USBA_FULL_SPEED_DEV:
2703 /* Full speed transaction */
2704 *bandwidth += maxpacketsize;
2705
2706 /* Add xfer specific protocol overheads */
2707 if ((endpoint->bmAttributes &
2708 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2709 /* Full speed interrupt transaction */
2710 *bandwidth += FS_NON_ISOC_PROTO_OVERHEAD;
2711 } else {
2712 /* Isochronous and input transaction */
2713 if ((endpoint->bEndpointAddress &
2714 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2715 *bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD;
2716 } else {
2717 /* Isochronous and output transaction */
2718 *bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD;
2719 }
2720 }
2721 break;
2722 }
2723
2724 return (USB_SUCCESS);
2725 }
2726
2727
2728 /*
2729 * ehci_adjust_polling_interval:
2730 *
2731 * Adjust bandwidth according usb device speed.
2732 */
2733 /* ARGSUSED */
2734 int
2735 ehci_adjust_polling_interval(
2736 ehci_state_t *ehcip,
2737 usb_ep_descr_t *endpoint,
2738 usb_port_status_t port_status)
2739 {
2740 uint_t interval;
2741 int i = 0;
2742
2743 /* Get the polling interval */
2744 interval = endpoint->bInterval;
2745
2746 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2747 "ehci_adjust_polling_interval: Polling interval 0x%x", interval);
2748
2749 /*
2750 * According USB 2.0 Specifications, a high-speed endpoint's
2751 * polling intervals are specified interms of 125us or micro
2752 * frame, where as full/low endpoint's polling intervals are
2753 * specified in milliseconds.
2754 *
2755 * A high speed interrupt/isochronous endpoints can specify
2756 * desired polling interval between 1 to 16 micro-frames,
2757 * where as full/low endpoints can specify between 1 to 255
2758 * milliseconds.
2759 */
2760 switch (port_status) {
2761 case USBA_LOW_SPEED_DEV:
2762 /*
2763 * Low speed endpoints are limited to specifying
2764 * only 8ms to 255ms in this driver. If a device
2765 * reports a polling interval that is less than 8ms,
2766 * it will use 8 ms instead.
2767 */
2768 if (interval < LS_MIN_POLL_INTERVAL) {
2769
2770 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2771 "Low speed endpoint's poll interval of %d ms "
2772 "is below threshold. Rounding up to %d ms",
2773 interval, LS_MIN_POLL_INTERVAL);
2774
2775 interval = LS_MIN_POLL_INTERVAL;
2776 }
2777
2778 /*
2779 * Return an error if the polling interval is greater
2780 * than 255ms.
2781 */
2782 if (interval > LS_MAX_POLL_INTERVAL) {
2783
2784 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2785 "Low speed endpoint's poll interval is "
2786 "greater than %d ms", LS_MAX_POLL_INTERVAL);
2787
2788 return (USB_FAILURE);
2789 }
2790 break;
2791
2792 case USBA_FULL_SPEED_DEV:
2793 /*
2794 * Return an error if the polling interval is less
2795 * than 1ms and greater than 255ms.
2796 */
2797 if ((interval < FS_MIN_POLL_INTERVAL) &&
2798 (interval > FS_MAX_POLL_INTERVAL)) {
2799
2800 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2801 "Full speed endpoint's poll interval must "
2802 "be between %d and %d ms", FS_MIN_POLL_INTERVAL,
2803 FS_MAX_POLL_INTERVAL);
2804
2805 return (USB_FAILURE);
2806 }
2807 break;
2808 case USBA_HIGH_SPEED_DEV:
2809 /*
2810 * Return an error if the polling interval is less 1
2811 * and greater than 16. Convert this value to 125us
2812 * units using 2^(bInterval -1). refer usb 2.0 spec
2813 * page 51 for details.
2814 */
2815 if ((interval < HS_MIN_POLL_INTERVAL) &&
2816 (interval > HS_MAX_POLL_INTERVAL)) {
2817
2818 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2819 "High speed endpoint's poll interval "
2820 "must be between %d and %d units",
2821 HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL);
2822
2823 return (USB_FAILURE);
2824 }
2825
2826 /* Adjust high speed device polling interval */
2827 interval =
2828 ehci_adjust_high_speed_polling_interval(ehcip, endpoint);
2829
2830 break;
2831 }
2832
2833 /*
2834 * If polling interval is greater than 32ms,
2835 * adjust polling interval equal to 32ms.
2836 */
2837 if (interval > EHCI_NUM_INTR_QH_LISTS) {
2838 interval = EHCI_NUM_INTR_QH_LISTS;
2839 }
2840
2841 /*
2842 * Find the nearest power of 2 that's less
2843 * than interval.
2844 */
2845 while ((ehci_pow_2(i)) <= interval) {
2846 i++;
2847 }
2848
2849 return (ehci_pow_2((i - 1)));
2850 }
2851
2852
2853 /*
2854 * ehci_adjust_high_speed_polling_interval:
2855 */
2856 /* ARGSUSED */
2857 static int
2858 ehci_adjust_high_speed_polling_interval(
2859 ehci_state_t *ehcip,
2860 usb_ep_descr_t *endpoint)
2861 {
2862 uint_t interval;
2863
2864 /* Get the polling interval */
2865 interval = ehci_pow_2(endpoint->bInterval - 1);
2866
2867 /*
2868 * Convert polling interval from micro seconds
2869 * to milli seconds.
2870 */
2871 if (interval <= EHCI_MAX_UFRAMES) {
2872 interval = 1;
2873 } else {
2874 interval = interval/EHCI_MAX_UFRAMES;
2875 }
2876
2877 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2878 "ehci_adjust_high_speed_polling_interval: "
2879 "High speed adjusted interval 0x%x", interval);
2880
2881 return (interval);
2882 }
2883
2884
2885 /*
2886 * ehci_lattice_height:
2887 *
2888 * Given the requested bandwidth, find the height in the tree at which the
2889 * nodes for this bandwidth fall. The height is measured as the number of
2890 * nodes from the leaf to the level specified by bandwidth The root of the
2891 * tree is at height TREE_HEIGHT.
2892 */
2893 static uint_t
2894 ehci_lattice_height(uint_t interval)
2895 {
2896 return (TREE_HEIGHT - (ehci_log_2(interval)));
2897 }
2898
2899
2900 /*
2901 * ehci_lattice_parent:
2902 *
2903 * Given a node in the lattice, find the index of the parent node
2904 */
2905 static uint_t
2906 ehci_lattice_parent(uint_t node)
2907 {
2908 if ((node % 2) == 0) {
2909
2910 return ((node/2) - 1);
2911 } else {
2912
2913 return ((node + 1)/2 - 1);
2914 }
2915 }
2916
2917
2918 /*
2919 * ehci_find_periodic_node:
2920 *
2921 * Based on the "real" array leaf node and interval, get the periodic node.
2922 */
2923 static uint_t
2924 ehci_find_periodic_node(uint_t leaf, int interval)
2925 {
2926 uint_t lattice_leaf;
2927 uint_t height = ehci_lattice_height(interval);
2928 uint_t pnode;
2929 int i;
2930
2931 /* Get the leaf number in the lattice */
2932 lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1;
2933
2934 /* Get the node in the lattice based on the height and leaf */
2935 pnode = lattice_leaf;
2936 for (i = 0; i < height; i++) {
2937 pnode = ehci_lattice_parent(pnode);
2938 }
2939
2940 return (pnode);
2941 }
2942
2943
2944 /*
2945 * ehci_leftmost_leaf:
2946 *
2947 * Find the leftmost leaf in the subtree specified by the node. Height refers
2948 * to number of nodes from the bottom of the tree to the node, including the
2949 * node.
2950 *
2951 * The formula for a zero based tree is:
2952 * 2^H * Node + 2^H - 1
2953 * The leaf of the tree is an array, convert the number for the array.
2954 * Subtract the size of nodes not in the array
2955 * 2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) =
2956 * 2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS =
2957 * 2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS
2958 * 0
2959 * 1 2
2960 * 0 1 2 3
2961 */
2962 static uint_t
2963 ehci_leftmost_leaf(
2964 uint_t node,
2965 uint_t height)
2966 {
2967 return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS);
2968 }
2969
2970
2971 /*
2972 * ehci_pow_2:
2973 *
2974 * Compute 2 to the power
2975 */
2976 static uint_t
2977 ehci_pow_2(uint_t x)
2978 {
2979 if (x == 0) {
2980
2981 return (1);
2982 } else {
2983
2984 return (2 << (x - 1));
2985 }
2986 }
2987
2988
2989 /*
2990 * ehci_log_2:
2991 *
2992 * Compute log base 2 of x
2993 */
2994 static uint_t
2995 ehci_log_2(uint_t x)
2996 {
2997 int i = 0;
2998
2999 while (x != 1) {
3000 x = x >> 1;
3001 i++;
3002 }
3003
3004 return (i);
3005 }
3006
3007
3008 /*
3009 * ehci_find_bestfit_hs_mask:
3010 *
3011 * Find the smask and cmask in the bandwidth allocation, and update the
3012 * bandwidth allocation.
3013 */
3014 static int
3015 ehci_find_bestfit_hs_mask(
3016 ehci_state_t *ehcip,
3017 uchar_t *smask,
3018 uint_t *pnode,
3019 usb_ep_descr_t *endpoint,
3020 uint_t bandwidth,
3021 int interval)
3022 {
3023 int i;
3024 uint_t elements, index;
3025 int array_leaf, best_array_leaf;
3026 uint_t node_bandwidth, best_node_bandwidth;
3027 uint_t leaf_count;
3028 uchar_t bw_mask;
3029 uchar_t best_smask;
3030
3031 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3032 "ehci_find_bestfit_hs_mask: ");
3033
3034 /* Get all the valid smasks */
3035 switch (ehci_pow_2(endpoint->bInterval - 1)) {
3036 case EHCI_INTR_1US_POLL:
3037 index = EHCI_1US_MASK_INDEX;
3038 elements = EHCI_INTR_1US_POLL;
3039 break;
3040 case EHCI_INTR_2US_POLL:
3041 index = EHCI_2US_MASK_INDEX;
3042 elements = EHCI_INTR_2US_POLL;
3043 break;
3044 case EHCI_INTR_4US_POLL:
3045 index = EHCI_4US_MASK_INDEX;
3046 elements = EHCI_INTR_4US_POLL;
3047 break;
3048 case EHCI_INTR_XUS_POLL:
3049 default:
3050 index = EHCI_XUS_MASK_INDEX;
3051 elements = EHCI_INTR_XUS_POLL;
3052 break;
3053 }
3054
3055 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3056
3057 /*
3058 * Because of the way the leaves are setup, we will automatically
3059 * hit the leftmost leaf of every possible node with this interval.
3060 */
3061 best_smask = 0x00;
3062 best_node_bandwidth = 0;
3063 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3064 /* Find the bandwidth mask */
3065 node_bandwidth = ehci_calculate_bw_availability_mask(ehcip,
3066 bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask);
3067
3068 /*
3069 * If this node cannot support our requirements skip to the
3070 * next leaf.
3071 */
3072 if (bw_mask == 0x00) {
3073 continue;
3074 }
3075
3076 /*
3077 * Now make sure our bandwidth requirements can be
3078 * satisfied with one of smasks in this node.
3079 */
3080 *smask = 0x00;
3081 for (i = index; i < (index + elements); i++) {
3082 /* Check the start split mask value */
3083 if (ehci_start_split_mask[index] & bw_mask) {
3084 *smask = ehci_start_split_mask[index];
3085 break;
3086 }
3087 }
3088
3089 /*
3090 * If an appropriate smask is found save the information if:
3091 * o best_smask has not been found yet.
3092 * - or -
3093 * o This is the node with the least amount of bandwidth
3094 */
3095 if ((*smask != 0x00) &&
3096 ((best_smask == 0x00) ||
3097 (best_node_bandwidth > node_bandwidth))) {
3098
3099 best_node_bandwidth = node_bandwidth;
3100 best_array_leaf = array_leaf;
3101 best_smask = *smask;
3102 }
3103 }
3104
3105 /*
3106 * If we find node that can handle the bandwidth populate the
3107 * appropriate variables and return success.
3108 */
3109 if (best_smask) {
3110 *smask = best_smask;
3111 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3112 interval);
3113 ehci_update_bw_availability(ehcip, bandwidth,
3114 ehci_index[best_array_leaf], leaf_count, best_smask);
3115
3116 return (USB_SUCCESS);
3117 }
3118
3119 return (USB_FAILURE);
3120 }
3121
3122
3123 /*
3124 * ehci_find_bestfit_ls_intr_mask:
3125 *
3126 * Find the smask and cmask in the bandwidth allocation.
3127 */
3128 static int
3129 ehci_find_bestfit_ls_intr_mask(
3130 ehci_state_t *ehcip,
3131 uchar_t *smask,
3132 uchar_t *cmask,
3133 uint_t *pnode,
3134 uint_t sbandwidth,
3135 uint_t cbandwidth,
3136 int interval)
3137 {
3138 int i;
3139 uint_t elements, index;
3140 int array_leaf, best_array_leaf;
3141 uint_t node_sbandwidth, node_cbandwidth;
3142 uint_t best_node_bandwidth;
3143 uint_t leaf_count;
3144 uchar_t bw_smask, bw_cmask;
3145 uchar_t best_smask, best_cmask;
3146
3147 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3148 "ehci_find_bestfit_ls_intr_mask: ");
3149
3150 /* For low and full speed devices */
3151 index = EHCI_XUS_MASK_INDEX;
3152 elements = EHCI_INTR_4MS_POLL;
3153
3154 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3155
3156 /*
3157 * Because of the way the leaves are setup, we will automatically
3158 * hit the leftmost leaf of every possible node with this interval.
3159 */
3160 best_smask = 0x00;
3161 best_node_bandwidth = 0;
3162 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3163 /* Find the bandwidth mask */
3164 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3165 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3166 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3167 cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask);
3168
3169 /*
3170 * If this node cannot support our requirements skip to the
3171 * next leaf.
3172 */
3173 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3174 continue;
3175 }
3176
3177 /*
3178 * Now make sure our bandwidth requirements can be
3179 * satisfied with one of smasks in this node.
3180 */
3181 *smask = 0x00;
3182 *cmask = 0x00;
3183 for (i = index; i < (index + elements); i++) {
3184 /* Check the start split mask value */
3185 if ((ehci_start_split_mask[index] & bw_smask) &&
3186 (ehci_intr_complete_split_mask[index] & bw_cmask)) {
3187 *smask = ehci_start_split_mask[index];
3188 *cmask = ehci_intr_complete_split_mask[index];
3189 break;
3190 }
3191 }
3192
3193 /*
3194 * If an appropriate smask is found save the information if:
3195 * o best_smask has not been found yet.
3196 * - or -
3197 * o This is the node with the least amount of bandwidth
3198 */
3199 if ((*smask != 0x00) &&
3200 ((best_smask == 0x00) ||
3201 (best_node_bandwidth >
3202 (node_sbandwidth + node_cbandwidth)))) {
3203 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3204 best_array_leaf = array_leaf;
3205 best_smask = *smask;
3206 best_cmask = *cmask;
3207 }
3208 }
3209
3210 /*
3211 * If we find node that can handle the bandwidth populate the
3212 * appropriate variables and return success.
3213 */
3214 if (best_smask) {
3215 *smask = best_smask;
3216 *cmask = best_cmask;
3217 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3218 interval);
3219 ehci_update_bw_availability(ehcip, sbandwidth,
3220 ehci_index[best_array_leaf], leaf_count, best_smask);
3221 ehci_update_bw_availability(ehcip, cbandwidth,
3222 ehci_index[best_array_leaf], leaf_count, best_cmask);
3223
3224 return (USB_SUCCESS);
3225 }
3226
3227 return (USB_FAILURE);
3228 }
3229
3230
3231 /*
3232 * ehci_find_bestfit_sitd_in_mask:
3233 *
3234 * Find the smask and cmask in the bandwidth allocation.
3235 */
3236 static int
3237 ehci_find_bestfit_sitd_in_mask(
3238 ehci_state_t *ehcip,
3239 uchar_t *smask,
3240 uchar_t *cmask,
3241 uint_t *pnode,
3242 uint_t sbandwidth,
3243 uint_t cbandwidth,
3244 int interval)
3245 {
3246 int i, uFrames, found;
3247 int array_leaf, best_array_leaf;
3248 uint_t node_sbandwidth, node_cbandwidth;
3249 uint_t best_node_bandwidth;
3250 uint_t leaf_count;
3251 uchar_t bw_smask, bw_cmask;
3252 uchar_t best_smask, best_cmask;
3253
3254 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3255 "ehci_find_bestfit_sitd_in_mask: ");
3256
3257 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3258
3259 /*
3260 * Because of the way the leaves are setup, we will automatically
3261 * hit the leftmost leaf of every possible node with this interval.
3262 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3263 */
3264 /*
3265 * Need to add an additional 2 uFrames, if the "L"ast
3266 * complete split is before uFrame 6. See section
3267 * 11.8.4 in USB 2.0 Spec. Currently we do not support
3268 * the "Back Ptr" which means we support on IN of
3269 * ~4*MAX_UFRAME_SITD_XFER bandwidth/
3270 */
3271 uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2;
3272 if (cbandwidth % MAX_UFRAME_SITD_XFER) {
3273 uFrames++;
3274 }
3275 if (uFrames > 6) {
3276
3277 return (USB_FAILURE);
3278 }
3279 *smask = 0x1;
3280 *cmask = 0x00;
3281 for (i = 0; i < uFrames; i++) {
3282 *cmask = *cmask << 1;
3283 *cmask |= 0x1;
3284 }
3285 /* cmask must start 2 frames after the smask */
3286 *cmask = *cmask << 2;
3287
3288 found = 0;
3289 best_smask = 0x00;
3290 best_node_bandwidth = 0;
3291 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3292 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3293 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3294 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3295 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3296 &bw_cmask);
3297
3298 /*
3299 * If this node cannot support our requirements skip to the
3300 * next leaf.
3301 */
3302 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3303 continue;
3304 }
3305
3306 for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) {
3307 if ((*smask & bw_smask) && (*cmask & bw_cmask)) {
3308 found = 1;
3309 break;
3310 }
3311 *smask = *smask << 1;
3312 *cmask = *cmask << 1;
3313 }
3314
3315 /*
3316 * If an appropriate smask is found save the information if:
3317 * o best_smask has not been found yet.
3318 * - or -
3319 * o This is the node with the least amount of bandwidth
3320 */
3321 if (found &&
3322 ((best_smask == 0x00) ||
3323 (best_node_bandwidth >
3324 (node_sbandwidth + node_cbandwidth)))) {
3325 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3326 best_array_leaf = array_leaf;
3327 best_smask = *smask;
3328 best_cmask = *cmask;
3329 }
3330 }
3331
3332 /*
3333 * If we find node that can handle the bandwidth populate the
3334 * appropriate variables and return success.
3335 */
3336 if (best_smask) {
3337 *smask = best_smask;
3338 *cmask = best_cmask;
3339 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3340 interval);
3341 ehci_update_bw_availability(ehcip, sbandwidth,
3342 ehci_index[best_array_leaf], leaf_count, best_smask);
3343 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3344 ehci_index[best_array_leaf], leaf_count, best_cmask);
3345
3346 return (USB_SUCCESS);
3347 }
3348
3349 return (USB_FAILURE);
3350 }
3351
3352
3353 /*
3354 * ehci_find_bestfit_sitd_out_mask:
3355 *
3356 * Find the smask in the bandwidth allocation.
3357 */
3358 static int
3359 ehci_find_bestfit_sitd_out_mask(
3360 ehci_state_t *ehcip,
3361 uchar_t *smask,
3362 uint_t *pnode,
3363 uint_t sbandwidth,
3364 int interval)
3365 {
3366 int i, uFrames, found;
3367 int array_leaf, best_array_leaf;
3368 uint_t node_sbandwidth;
3369 uint_t best_node_bandwidth;
3370 uint_t leaf_count;
3371 uchar_t bw_smask;
3372 uchar_t best_smask;
3373
3374 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3375 "ehci_find_bestfit_sitd_out_mask: ");
3376
3377 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3378
3379 /*
3380 * Because of the way the leaves are setup, we will automatically
3381 * hit the leftmost leaf of every possible node with this interval.
3382 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3383 */
3384 *smask = 0x00;
3385 uFrames = sbandwidth / MAX_UFRAME_SITD_XFER;
3386 if (sbandwidth % MAX_UFRAME_SITD_XFER) {
3387 uFrames++;
3388 }
3389 for (i = 0; i < uFrames; i++) {
3390 *smask = *smask << 1;
3391 *smask |= 0x1;
3392 }
3393
3394 found = 0;
3395 best_smask = 0x00;
3396 best_node_bandwidth = 0;
3397 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3398 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3399 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3400 &bw_smask);
3401
3402 /*
3403 * If this node cannot support our requirements skip to the
3404 * next leaf.
3405 */
3406 if (bw_smask == 0x00) {
3407 continue;
3408 }
3409
3410 /* You cannot have a start split on the 8th uFrame */
3411 for (i = 0; (*smask & 0x80) == 0; i++) {
3412 if (*smask & bw_smask) {
3413 found = 1;
3414 break;
3415 }
3416 *smask = *smask << 1;
3417 }
3418
3419 /*
3420 * If an appropriate smask is found save the information if:
3421 * o best_smask has not been found yet.
3422 * - or -
3423 * o This is the node with the least amount of bandwidth
3424 */
3425 if (found &&
3426 ((best_smask == 0x00) ||
3427 (best_node_bandwidth > node_sbandwidth))) {
3428 best_node_bandwidth = node_sbandwidth;
3429 best_array_leaf = array_leaf;
3430 best_smask = *smask;
3431 }
3432 }
3433
3434 /*
3435 * If we find node that can handle the bandwidth populate the
3436 * appropriate variables and return success.
3437 */
3438 if (best_smask) {
3439 *smask = best_smask;
3440 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3441 interval);
3442 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3443 ehci_index[best_array_leaf], leaf_count, best_smask);
3444
3445 return (USB_SUCCESS);
3446 }
3447
3448 return (USB_FAILURE);
3449 }
3450
3451
3452 /*
3453 * ehci_calculate_bw_availability_mask:
3454 *
3455 * Returns the "total bandwidth used" in this node.
3456 * Populates bw_mask with the uFrames that can support the bandwidth.
3457 *
3458 * If all the Frames cannot support this bandwidth, then bw_mask
3459 * will return 0x00 and the "total bandwidth used" will be invalid.
3460 */
3461 static uint_t
3462 ehci_calculate_bw_availability_mask(
3463 ehci_state_t *ehcip,
3464 uint_t bandwidth,
3465 int leaf,
3466 int leaf_count,
3467 uchar_t *bw_mask)
3468 {
3469 int i, j;
3470 uchar_t bw_uframe;
3471 int uframe_total;
3472 ehci_frame_bandwidth_t *fbp;
3473 uint_t total_bandwidth = 0;
3474
3475 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3476 "ehci_calculate_bw_availability_mask: leaf %d leaf count %d",
3477 leaf, leaf_count);
3478
3479 /* Start by saying all uFrames are available */
3480 *bw_mask = 0xFF;
3481
3482 for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) {
3483 fbp = &ehcip->ehci_frame_bandwidth[leaf + i];
3484
3485 total_bandwidth += fbp->ehci_allocated_frame_bandwidth;
3486
3487 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3488 /*
3489 * If the uFrame in bw_mask is available check to see if
3490 * it can support the additional bandwidth.
3491 */
3492 bw_uframe = (*bw_mask & (0x1 << j));
3493 uframe_total =
3494 fbp->ehci_micro_frame_bandwidth[j] +
3495 bandwidth;
3496 if ((bw_uframe) &&
3497 (uframe_total > HS_PERIODIC_BANDWIDTH)) {
3498 *bw_mask = *bw_mask & ~bw_uframe;
3499 }
3500 }
3501 }
3502
3503 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3504 "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x",
3505 *bw_mask);
3506
3507 return (total_bandwidth);
3508 }
3509
3510
3511 /*
3512 * ehci_update_bw_availability:
3513 *
3514 * The leftmost leaf needs to be in terms of array position and
3515 * not the actual lattice position.
3516 */
3517 static void
3518 ehci_update_bw_availability(
3519 ehci_state_t *ehcip,
3520 int bandwidth,
3521 int leftmost_leaf,
3522 int leaf_count,
3523 uchar_t mask)
3524 {
3525 int i, j;
3526 ehci_frame_bandwidth_t *fbp;
3527 int uFrame_bandwidth[8];
3528
3529 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3530 "ehci_update_bw_availability: "
3531 "leaf %d count %d bandwidth 0x%x mask 0x%x",
3532 leftmost_leaf, leaf_count, bandwidth, mask);
3533
3534 ASSERT(leftmost_leaf < 32);
3535 ASSERT(leftmost_leaf >= 0);
3536
3537 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3538 if (mask & 0x1) {
3539 uFrame_bandwidth[j] = bandwidth;
3540 } else {
3541 uFrame_bandwidth[j] = 0;
3542 }
3543
3544 mask = mask >> 1;
3545 }
3546
3547 /* Updated all the effected leafs with the bandwidth */
3548 for (i = 0; i < leaf_count; i++) {
3549 fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i];
3550
3551 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3552 fbp->ehci_micro_frame_bandwidth[j] +=
3553 uFrame_bandwidth[j];
3554 fbp->ehci_allocated_frame_bandwidth +=
3555 uFrame_bandwidth[j];
3556 }
3557 }
3558 }
3559
3560 /*
3561 * Miscellaneous functions
3562 */
3563
3564 /*
3565 * ehci_obtain_state:
3566 *
3567 * NOTE: This function is also called from POLLED MODE.
3568 */
3569 ehci_state_t *
3570 ehci_obtain_state(dev_info_t *dip)
3571 {
3572 int instance = ddi_get_instance(dip);
3573
3574 ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance);
3575
3576 ASSERT(state != NULL);
3577
3578 return (state);
3579 }
3580
3581
3582 /*
3583 * ehci_state_is_operational:
3584 *
3585 * Check the Host controller state and return proper values.
3586 */
3587 int
3588 ehci_state_is_operational(ehci_state_t *ehcip)
3589 {
3590 int val;
3591
3592 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3593
3594 switch (ehcip->ehci_hc_soft_state) {
3595 case EHCI_CTLR_INIT_STATE:
3596 case EHCI_CTLR_SUSPEND_STATE:
3597 val = USB_FAILURE;
3598 break;
3599 case EHCI_CTLR_OPERATIONAL_STATE:
3600 val = USB_SUCCESS;
3601 break;
3602 case EHCI_CTLR_ERROR_STATE:
3603 val = USB_HC_HARDWARE_ERROR;
3604 break;
3605 default:
3606 val = USB_FAILURE;
3607 break;
3608 }
3609
3610 return (val);
3611 }
3612
3613
3614 /*
3615 * ehci_do_soft_reset
3616 *
3617 * Do soft reset of ehci host controller.
3618 */
3619 int
3620 ehci_do_soft_reset(ehci_state_t *ehcip)
3621 {
3622 usb_frame_number_t before_frame_number, after_frame_number;
3623 ehci_regs_t *ehci_save_regs;
3624
3625 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3626
3627 /* Increment host controller error count */
3628 ehcip->ehci_hc_error++;
3629
3630 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3631 "ehci_do_soft_reset:"
3632 "Reset ehci host controller 0x%x", ehcip->ehci_hc_error);
3633
3634 /*
3635 * Allocate space for saving current Host Controller
3636 * registers. Don't do any recovery if allocation
3637 * fails.
3638 */
3639 ehci_save_regs = (ehci_regs_t *)
3640 kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP);
3641
3642 if (ehci_save_regs == NULL) {
3643 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3644 "ehci_do_soft_reset: kmem_zalloc failed");
3645
3646 return (USB_FAILURE);
3647 }
3648
3649 /* Save current ehci registers */
3650 ehci_save_regs->ehci_command = Get_OpReg(ehci_command);
3651 ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt);
3652 ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment);
3653 ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr);
3654 ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag);
3655 ehci_save_regs->ehci_periodic_list_base =
3656 Get_OpReg(ehci_periodic_list_base);
3657
3658 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3659 "ehci_do_soft_reset: Save reg = 0x%p", (void *)ehci_save_regs);
3660
3661 /* Disable all list processing and interrupts */
3662 Set_OpReg(ehci_command, Get_OpReg(ehci_command) &
3663 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE));
3664
3665 /* Disable all EHCI interrupts */
3666 Set_OpReg(ehci_interrupt, 0);
3667
3668 /* Wait for few milliseconds */
3669 drv_usecwait(EHCI_SOF_TIMEWAIT);
3670
3671 /* Do light soft reset of ehci host controller */
3672 Set_OpReg(ehci_command,
3673 Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET);
3674
3675 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3676 "ehci_do_soft_reset: Reset in progress");
3677
3678 /* Wait for reset to complete */
3679 drv_usecwait(EHCI_RESET_TIMEWAIT);
3680
3681 /*
3682 * Restore previous saved EHCI register value
3683 * into the current EHCI registers.
3684 */
3685 Set_OpReg(ehci_ctrl_segment, (uint32_t)
3686 ehci_save_regs->ehci_ctrl_segment);
3687
3688 Set_OpReg(ehci_periodic_list_base, (uint32_t)
3689 ehci_save_regs->ehci_periodic_list_base);
3690
3691 Set_OpReg(ehci_async_list_addr, (uint32_t)
3692 ehci_save_regs->ehci_async_list_addr);
3693
3694 /*
3695 * For some reason this register might get nulled out by
3696 * the Uli M1575 South Bridge. To workaround the hardware
3697 * problem, check the value after write and retry if the
3698 * last write fails.
3699 */
3700 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
3701 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
3702 (ehci_save_regs->ehci_async_list_addr !=
3703 Get_OpReg(ehci_async_list_addr))) {
3704 int retry = 0;
3705
3706 Set_OpRegRetry(ehci_async_list_addr, (uint32_t)
3707 ehci_save_regs->ehci_async_list_addr, retry);
3708 if (retry >= EHCI_MAX_RETRY) {
3709 USB_DPRINTF_L2(PRINT_MASK_ATTA,
3710 ehcip->ehci_log_hdl, "ehci_do_soft_reset:"
3711 " ASYNCLISTADDR write failed.");
3712
3713 return (USB_FAILURE);
3714 }
3715 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
3716 "ehci_do_soft_reset: ASYNCLISTADDR "
3717 "write failed, retry=%d", retry);
3718 }
3719
3720 Set_OpReg(ehci_config_flag, (uint32_t)
3721 ehci_save_regs->ehci_config_flag);
3722
3723 /* Enable both Asynchronous and Periodic Schedule if necessary */
3724 ehci_toggle_scheduler(ehcip);
3725
3726 /*
3727 * Set ehci_interrupt to enable all interrupts except Root
3728 * Hub Status change and frame list rollover interrupts.
3729 */
3730 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
3731 EHCI_INTR_FRAME_LIST_ROLLOVER |
3732 EHCI_INTR_USB_ERROR |
3733 EHCI_INTR_USB);
3734
3735 /*
3736 * Deallocate the space that allocated for saving
3737 * HC registers.
3738 */
3739 kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t));
3740
3741 /*
3742 * Set the desired interrupt threshold, frame list size (if
3743 * applicable) and turn EHCI host controller.
3744 */
3745 Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) &
3746 ~EHCI_CMD_INTR_THRESHOLD) |
3747 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
3748
3749 /* Wait 10ms for EHCI to start sending SOF */
3750 drv_usecwait(EHCI_RESET_TIMEWAIT);
3751
3752 /*
3753 * Get the current usb frame number before waiting for
3754 * few milliseconds.
3755 */
3756 before_frame_number = ehci_get_current_frame_number(ehcip);
3757
3758 /* Wait for few milliseconds */
3759 drv_usecwait(EHCI_SOF_TIMEWAIT);
3760
3761 /*
3762 * Get the current usb frame number after waiting for
3763 * few milliseconds.
3764 */
3765 after_frame_number = ehci_get_current_frame_number(ehcip);
3766
3767 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3768 "ehci_do_soft_reset: Before Frame Number 0x%llx "
3769 "After Frame Number 0x%llx",
3770 (unsigned long long)before_frame_number,
3771 (unsigned long long)after_frame_number);
3772
3773 if ((after_frame_number <= before_frame_number) &&
3774 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
3775
3776 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3777 "ehci_do_soft_reset: Soft reset failed");
3778
3779 return (USB_FAILURE);
3780 }
3781
3782 return (USB_SUCCESS);
3783 }
3784
3785
3786 /*
3787 * ehci_get_xfer_attrs:
3788 *
3789 * Get the attributes of a particular xfer.
3790 *
3791 * NOTE: This function is also called from POLLED MODE.
3792 */
3793 usb_req_attrs_t
3794 ehci_get_xfer_attrs(
3795 ehci_state_t *ehcip,
3796 ehci_pipe_private_t *pp,
3797 ehci_trans_wrapper_t *tw)
3798 {
3799 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
3800 usb_req_attrs_t attrs = USB_ATTRS_NONE;
3801
3802 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3803 "ehci_get_xfer_attrs:");
3804
3805 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
3806 case USB_EP_ATTR_CONTROL:
3807 attrs = ((usb_ctrl_req_t *)
3808 tw->tw_curr_xfer_reqp)->ctrl_attributes;
3809 break;
3810 case USB_EP_ATTR_BULK:
3811 attrs = ((usb_bulk_req_t *)
3812 tw->tw_curr_xfer_reqp)->bulk_attributes;
3813 break;
3814 case USB_EP_ATTR_INTR:
3815 attrs = ((usb_intr_req_t *)
3816 tw->tw_curr_xfer_reqp)->intr_attributes;
3817 break;
3818 }
3819
3820 return (attrs);
3821 }
3822
3823
3824 /*
3825 * ehci_get_current_frame_number:
3826 *
3827 * Get the current software based usb frame number.
3828 */
3829 usb_frame_number_t
3830 ehci_get_current_frame_number(ehci_state_t *ehcip)
3831 {
3832 usb_frame_number_t usb_frame_number;
3833 usb_frame_number_t ehci_fno, micro_frame_number;
3834
3835 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3836
3837 ehci_fno = ehcip->ehci_fno;
3838 micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF;
3839
3840 /*
3841 * Calculate current software based usb frame number.
3842 *
3843 * This code accounts for the fact that frame number is
3844 * updated by the Host Controller before the ehci driver
3845 * gets an FrameListRollover interrupt that will adjust
3846 * Frame higher part.
3847 *
3848 * Refer ehci specification 1.0, section 2.3.2, page 21.
3849 */
3850 micro_frame_number = ((micro_frame_number & 0x1FFF) |
3851 ehci_fno) + (((micro_frame_number & 0x3FFF) ^
3852 ehci_fno) & 0x2000);
3853
3854 /*
3855 * Micro Frame number is equivalent to 125 usec. Eight
3856 * Micro Frame numbers are equivalent to one millsecond
3857 * or one usb frame number.
3858 */
3859 usb_frame_number = micro_frame_number >>
3860 EHCI_uFRAMES_PER_USB_FRAME_SHIFT;
3861
3862 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3863 "ehci_get_current_frame_number: "
3864 "Current usb uframe number = 0x%llx "
3865 "Current usb frame number = 0x%llx",
3866 (unsigned long long)micro_frame_number,
3867 (unsigned long long)usb_frame_number);
3868
3869 return (usb_frame_number);
3870 }
3871
3872
3873 /*
3874 * ehci_cpr_cleanup:
3875 *
3876 * Cleanup ehci state and other ehci specific informations across
3877 * Check Point Resume (CPR).
3878 */
3879 static void
3880 ehci_cpr_cleanup(ehci_state_t *ehcip)
3881 {
3882 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3883
3884 /* Reset software part of usb frame number */
3885 ehcip->ehci_fno = 0;
3886 }
3887
3888
3889 /*
3890 * ehci_wait_for_sof:
3891 *
3892 * Wait for couple of SOF interrupts
3893 */
3894 int
3895 ehci_wait_for_sof(ehci_state_t *ehcip)
3896 {
3897 usb_frame_number_t before_frame_number, after_frame_number;
3898 int error = USB_SUCCESS;
3899
3900 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3901 ehcip->ehci_log_hdl, "ehci_wait_for_sof");
3902
3903 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3904
3905 error = ehci_state_is_operational(ehcip);
3906
3907 if (error != USB_SUCCESS) {
3908
3909 return (error);
3910 }
3911
3912 /* Get the current usb frame number before waiting for two SOFs */
3913 before_frame_number = ehci_get_current_frame_number(ehcip);
3914
3915 mutex_exit(&ehcip->ehci_int_mutex);
3916
3917 /* Wait for few milliseconds */
3918 delay(drv_usectohz(EHCI_SOF_TIMEWAIT));
3919
3920 mutex_enter(&ehcip->ehci_int_mutex);
3921
3922 /* Get the current usb frame number after woken up */
3923 after_frame_number = ehci_get_current_frame_number(ehcip);
3924
3925 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3926 "ehci_wait_for_sof: framenumber: before 0x%llx "
3927 "after 0x%llx",
3928 (unsigned long long)before_frame_number,
3929 (unsigned long long)after_frame_number);
3930
3931 /* Return failure, if usb frame number has not been changed */
3932 if (after_frame_number <= before_frame_number) {
3933
3934 if ((ehci_do_soft_reset(ehcip)) != USB_SUCCESS) {
3935
3936 USB_DPRINTF_L0(PRINT_MASK_LISTS,
3937 ehcip->ehci_log_hdl, "No SOF interrupts");
3938
3939 /* Set host controller soft state to error */
3940 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
3941
3942 return (USB_FAILURE);
3943 }
3944
3945 }
3946
3947 return (USB_SUCCESS);
3948 }
3949
3950 /*
3951 * Toggle the async/periodic schedule based on opened pipe count.
3952 * During pipe cleanup(in pipe reset case), the pipe's QH is temporarily
3953 * disabled. But the TW on the pipe is not freed. In this case, we need
3954 * to disable async/periodic schedule for some non-compatible hardware.
3955 * Otherwise, the hardware will overwrite software's configuration of
3956 * the QH.
3957 */
3958 void
3959 ehci_toggle_scheduler_on_pipe(ehci_state_t *ehcip)
3960 {
3961 uint_t temp_reg, cmd_reg;
3962
3963 cmd_reg = Get_OpReg(ehci_command);
3964 temp_reg = cmd_reg;
3965
3966 /*
3967 * Enable/Disable asynchronous scheduler, and
3968 * turn on/off async list door bell
3969 */
3970 if (ehcip->ehci_open_async_count) {
3971 if ((ehcip->ehci_async_req_count > 0) &&
3972 ((cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE) == 0)) {
3973 /*
3974 * For some reason this address might get nulled out by
3975 * the ehci chip. Set it here just in case it is null.
3976 */
3977 Set_OpReg(ehci_async_list_addr,
3978 ehci_qh_cpu_to_iommu(ehcip,
3979 ehcip->ehci_head_of_async_sched_list));
3980
3981 /*
3982 * For some reason this register might get nulled out by
3983 * the Uli M1575 Southbridge. To workaround the HW
3984 * problem, check the value after write and retry if the
3985 * last write fails.
3986 *
3987 * If the ASYNCLISTADDR remains "stuck" after
3988 * EHCI_MAX_RETRY retries, then the M1575 is broken
3989 * and is stuck in an inconsistent state and is about
3990 * to crash the machine with a trn_oor panic when it
3991 * does a DMA read from 0x0. It is better to panic
3992 * now rather than wait for the trn_oor crash; this
3993 * way Customer Service will have a clean signature
3994 * that indicts the M1575 chip rather than a
3995 * mysterious and hard-to-diagnose trn_oor panic.
3996 */
3997 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
3998 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
3999 (ehci_qh_cpu_to_iommu(ehcip,
4000 ehcip->ehci_head_of_async_sched_list) !=
4001 Get_OpReg(ehci_async_list_addr))) {
4002 int retry = 0;
4003
4004 Set_OpRegRetry(ehci_async_list_addr,
4005 ehci_qh_cpu_to_iommu(ehcip,
4006 ehcip->ehci_head_of_async_sched_list),
4007 retry);
4008 if (retry >= EHCI_MAX_RETRY)
4009 cmn_err(CE_PANIC,
4010 "ehci_toggle_scheduler_on_pipe: "
4011 "ASYNCLISTADDR write failed.");
4012
4013 USB_DPRINTF_L2(PRINT_MASK_ATTA,
4014 ehcip->ehci_log_hdl,
4015 "ehci_toggle_scheduler_on_pipe:"
4016 " ASYNCLISTADDR write failed, retry=%d",
4017 retry);
4018 }
4019
4020 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4021 }
4022 } else {
4023 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4024 }
4025
4026 if (ehcip->ehci_open_periodic_count) {
4027 if ((ehcip->ehci_periodic_req_count > 0) &&
4028 ((cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE) == 0)) {
4029 /*
4030 * For some reason this address get's nulled out by
4031 * the ehci chip. Set it here just in case it is null.
4032 */
4033 Set_OpReg(ehci_periodic_list_base,
4034 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4035 0xFFFFF000));
4036 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4037 }
4038 } else {
4039 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4040 }
4041
4042 /* Just an optimization */
4043 if (temp_reg != cmd_reg) {
4044 Set_OpReg(ehci_command, cmd_reg);
4045 }
4046 }
4047
4048
4049 /*
4050 * ehci_toggle_scheduler:
4051 *
4052 * Turn scheduler based on pipe open count.
4053 */
4054 void
4055 ehci_toggle_scheduler(ehci_state_t *ehcip)
4056 {
4057 uint_t temp_reg, cmd_reg;
4058
4059 /*
4060 * For performance optimization, we need to change the bits
4061 * if (async == 1||async == 0) OR (periodic == 1||periodic == 0)
4062 *
4063 * Related bits already enabled if
4064 * async and periodic req counts are > 1
4065 * OR async req count > 1 & no periodic pipe
4066 * OR periodic req count > 1 & no async pipe
4067 */
4068 if (((ehcip->ehci_async_req_count > 1) &&
4069 (ehcip->ehci_periodic_req_count > 1)) ||
4070 ((ehcip->ehci_async_req_count > 1) &&
4071 (ehcip->ehci_open_periodic_count == 0)) ||
4072 ((ehcip->ehci_periodic_req_count > 1) &&
4073 (ehcip->ehci_open_async_count == 0))) {
4074 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4075 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4076 "async/periodic bits no need to change");
4077
4078 return;
4079 }
4080
4081 cmd_reg = Get_OpReg(ehci_command);
4082 temp_reg = cmd_reg;
4083
4084 /*
4085 * Enable/Disable asynchronous scheduler, and
4086 * turn on/off async list door bell
4087 */
4088 if (ehcip->ehci_async_req_count > 1) {
4089 /* we already enable the async bit */
4090 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4091 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4092 "async bit already enabled: cmd_reg=0x%x", cmd_reg);
4093 } else if (ehcip->ehci_async_req_count == 1) {
4094 if (!(cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE)) {
4095 /*
4096 * For some reason this address might get nulled out by
4097 * the ehci chip. Set it here just in case it is null.
4098 * If it's not null, we should not reset the
4099 * ASYNCLISTADDR, because it's updated by hardware to
4100 * point to the next queue head to be executed.
4101 */
4102 if (!Get_OpReg(ehci_async_list_addr)) {
4103 Set_OpReg(ehci_async_list_addr,
4104 ehci_qh_cpu_to_iommu(ehcip,
4105 ehcip->ehci_head_of_async_sched_list));
4106 }
4107
4108 /*
4109 * For some reason this register might get nulled out by
4110 * the Uli M1575 Southbridge. To workaround the HW
4111 * problem, check the value after write and retry if the
4112 * last write fails.
4113 *
4114 * If the ASYNCLISTADDR remains "stuck" after
4115 * EHCI_MAX_RETRY retries, then the M1575 is broken
4116 * and is stuck in an inconsistent state and is about
4117 * to crash the machine with a trn_oor panic when it
4118 * does a DMA read from 0x0. It is better to panic
4119 * now rather than wait for the trn_oor crash; this
4120 * way Customer Service will have a clean signature
4121 * that indicts the M1575 chip rather than a
4122 * mysterious and hard-to-diagnose trn_oor panic.
4123 */
4124 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4125 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4126 (ehci_qh_cpu_to_iommu(ehcip,
4127 ehcip->ehci_head_of_async_sched_list) !=
4128 Get_OpReg(ehci_async_list_addr))) {
4129 int retry = 0;
4130
4131 Set_OpRegRetry(ehci_async_list_addr,
4132 ehci_qh_cpu_to_iommu(ehcip,
4133 ehcip->ehci_head_of_async_sched_list),
4134 retry);
4135 if (retry >= EHCI_MAX_RETRY)
4136 cmn_err(CE_PANIC,
4137 "ehci_toggle_scheduler: "
4138 "ASYNCLISTADDR write failed.");
4139
4140 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4141 ehcip->ehci_log_hdl,
4142 "ehci_toggle_scheduler: ASYNCLISTADDR "
4143 "write failed, retry=%d", retry);
4144 }
4145 }
4146 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4147 } else {
4148 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4149 }
4150
4151 if (ehcip->ehci_periodic_req_count > 1) {
4152 /* we already enable the periodic bit. */
4153 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4154 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4155 "periodic bit already enabled: cmd_reg=0x%x", cmd_reg);
4156 } else if (ehcip->ehci_periodic_req_count == 1) {
4157 if (!(cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE)) {
4158 /*
4159 * For some reason this address get's nulled out by
4160 * the ehci chip. Set it here just in case it is null.
4161 */
4162 Set_OpReg(ehci_periodic_list_base,
4163 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4164 0xFFFFF000));
4165 }
4166 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4167 } else {
4168 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4169 }
4170
4171 /* Just an optimization */
4172 if (temp_reg != cmd_reg) {
4173 Set_OpReg(ehci_command, cmd_reg);
4174
4175 /* To make sure the command register is updated correctly */
4176 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4177 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
4178 int retry = 0;
4179
4180 Set_OpRegRetry(ehci_command, cmd_reg, retry);
4181 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4182 ehcip->ehci_log_hdl,
4183 "ehci_toggle_scheduler: CMD write failed, retry=%d",
4184 retry);
4185 }
4186
4187 }
4188 }
4189
4190 /*
4191 * ehci print functions
4192 */
4193
4194 /*
4195 * ehci_print_caps:
4196 */
4197 void
4198 ehci_print_caps(ehci_state_t *ehcip)
4199 {
4200 uint_t i;
4201
4202 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4203 "\n\tUSB 2.0 Host Controller Characteristics\n");
4204
4205 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4206 "Caps Length: 0x%x Version: 0x%x\n",
4207 Get_8Cap(ehci_caps_length), Get_16Cap(ehci_version));
4208
4209 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4210 "Structural Parameters\n");
4211 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4212 "Port indicators: %s", (Get_Cap(ehci_hcs_params) &
4213 EHCI_HCS_PORT_INDICATOR) ? "Yes" : "No");
4214 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4215 "No of Classic host controllers: 0x%x",
4216 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_COMP_CTRLS)
4217 >> EHCI_HCS_NUM_COMP_CTRL_SHIFT);
4218 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4219 "No of ports per Classic host controller: 0x%x",
4220 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS_CC)
4221 >> EHCI_HCS_NUM_PORTS_CC_SHIFT);
4222 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4223 "Port routing rules: %s", (Get_Cap(ehci_hcs_params) &
4224 EHCI_HCS_PORT_ROUTING_RULES) ? "Yes" : "No");
4225 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4226 "Port power control: %s", (Get_Cap(ehci_hcs_params) &
4227 EHCI_HCS_PORT_POWER_CONTROL) ? "Yes" : "No");
4228 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4229 "No of root hub ports: 0x%x\n",
4230 Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS);
4231
4232 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4233 "Capability Parameters\n");
4234 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4235 "EHCI extended capability: %s", (Get_Cap(ehci_hcc_params) &
4236 EHCI_HCC_EECP) ? "Yes" : "No");
4237 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4238 "Isoch schedule threshold: 0x%x",
4239 Get_Cap(ehci_hcc_params) & EHCI_HCC_ISOCH_SCHED_THRESHOLD);
4240 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4241 "Async schedule park capability: %s", (Get_Cap(ehci_hcc_params) &
4242 EHCI_HCC_ASYNC_SCHED_PARK_CAP) ? "Yes" : "No");
4243 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4244 "Programmable frame list flag: %s", (Get_Cap(ehci_hcc_params) &
4245 EHCI_HCC_PROG_FRAME_LIST_FLAG) ? "256/512/1024" : "1024");
4246 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4247 "64bit addressing capability: %s\n", (Get_Cap(ehci_hcc_params) &
4248 EHCI_HCC_64BIT_ADDR_CAP) ? "Yes" : "No");
4249
4250 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4251 "Classic Port Route Description");
4252
4253 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4254 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4255 "\tPort Route 0x%x: 0x%x", i, Get_8Cap(ehci_port_route[i]));
4256 }
4257 }
4258
4259
4260 /*
4261 * ehci_print_regs:
4262 */
4263 void
4264 ehci_print_regs(ehci_state_t *ehcip)
4265 {
4266 uint_t i;
4267
4268 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4269 "\n\tEHCI%d Operational Registers\n",
4270 ddi_get_instance(ehcip->ehci_dip));
4271
4272 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4273 "Command: 0x%x Status: 0x%x",
4274 Get_OpReg(ehci_command), Get_OpReg(ehci_status));
4275 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4276 "Interrupt: 0x%x Frame Index: 0x%x",
4277 Get_OpReg(ehci_interrupt), Get_OpReg(ehci_frame_index));
4278 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4279 "Control Segment: 0x%x Periodic List Base: 0x%x",
4280 Get_OpReg(ehci_ctrl_segment), Get_OpReg(ehci_periodic_list_base));
4281 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4282 "Async List Addr: 0x%x Config Flag: 0x%x",
4283 Get_OpReg(ehci_async_list_addr), Get_OpReg(ehci_config_flag));
4284
4285 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4286 "Root Hub Port Status");
4287
4288 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4289 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4290 "\tPort Status 0x%x: 0x%x ", i,
4291 Get_OpReg(ehci_rh_port_status[i]));
4292 }
4293 }
4294
4295
4296 /*
4297 * ehci_print_qh:
4298 */
4299 void
4300 ehci_print_qh(
4301 ehci_state_t *ehcip,
4302 ehci_qh_t *qh)
4303 {
4304 uint_t i;
4305
4306 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4307 "ehci_print_qh: qh = 0x%p", (void *)qh);
4308
4309 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4310 "\tqh_link_ptr: 0x%x ", Get_QH(qh->qh_link_ptr));
4311 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4312 "\tqh_ctrl: 0x%x ", Get_QH(qh->qh_ctrl));
4313 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4314 "\tqh_split_ctrl: 0x%x ", Get_QH(qh->qh_split_ctrl));
4315 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4316 "\tqh_curr_qtd: 0x%x ", Get_QH(qh->qh_curr_qtd));
4317 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4318 "\tqh_next_qtd: 0x%x ", Get_QH(qh->qh_next_qtd));
4319 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4320 "\tqh_alt_next_qtd: 0x%x ", Get_QH(qh->qh_alt_next_qtd));
4321 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4322 "\tqh_status: 0x%x ", Get_QH(qh->qh_status));
4323
4324 for (i = 0; i < 5; i++) {
4325 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4326 "\tqh_buf[%d]: 0x%x ", i, Get_QH(qh->qh_buf[i]));
4327 }
4328
4329 for (i = 0; i < 5; i++) {
4330 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4331 "\tqh_buf_high[%d]: 0x%x ",
4332 i, Get_QH(qh->qh_buf_high[i]));
4333 }
4334
4335 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4336 "\tqh_dummy_qtd: 0x%x ", Get_QH(qh->qh_dummy_qtd));
4337 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4338 "\tqh_prev: 0x%x ", Get_QH(qh->qh_prev));
4339 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4340 "\tqh_state: 0x%x ", Get_QH(qh->qh_state));
4341 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4342 "\tqh_reclaim_next: 0x%x ", Get_QH(qh->qh_reclaim_next));
4343 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4344 "\tqh_reclaim_frame: 0x%x ", Get_QH(qh->qh_reclaim_frame));
4345 }
4346
4347
4348 /*
4349 * ehci_print_qtd:
4350 */
4351 void
4352 ehci_print_qtd(
4353 ehci_state_t *ehcip,
4354 ehci_qtd_t *qtd)
4355 {
4356 uint_t i;
4357
4358 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4359 "ehci_print_qtd: qtd = 0x%p", (void *)qtd);
4360
4361 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4362 "\tqtd_next_qtd: 0x%x ", Get_QTD(qtd->qtd_next_qtd));
4363 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4364 "\tqtd_alt_next_qtd: 0x%x ", Get_QTD(qtd->qtd_alt_next_qtd));
4365 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4366 "\tqtd_ctrl: 0x%x ", Get_QTD(qtd->qtd_ctrl));
4367
4368 for (i = 0; i < 5; i++) {
4369 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4370 "\tqtd_buf[%d]: 0x%x ", i, Get_QTD(qtd->qtd_buf[i]));
4371 }
4372
4373 for (i = 0; i < 5; i++) {
4374 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4375 "\tqtd_buf_high[%d]: 0x%x ",
4376 i, Get_QTD(qtd->qtd_buf_high[i]));
4377 }
4378
4379 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4380 "\tqtd_trans_wrapper: 0x%x ", Get_QTD(qtd->qtd_trans_wrapper));
4381 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4382 "\tqtd_tw_next_qtd: 0x%x ", Get_QTD(qtd->qtd_tw_next_qtd));
4383 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4384 "\tqtd_active_qtd_next: 0x%x ", Get_QTD(qtd->qtd_active_qtd_next));
4385 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4386 "\tqtd_active_qtd_prev: 0x%x ", Get_QTD(qtd->qtd_active_qtd_prev));
4387 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4388 "\tqtd_state: 0x%x ", Get_QTD(qtd->qtd_state));
4389 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4390 "\tqtd_ctrl_phase: 0x%x ", Get_QTD(qtd->qtd_ctrl_phase));
4391 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4392 "\tqtd_xfer_offs: 0x%x ", Get_QTD(qtd->qtd_xfer_offs));
4393 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4394 "\tqtd_xfer_len: 0x%x ", Get_QTD(qtd->qtd_xfer_len));
4395 }
4396
4397 /*
4398 * ehci kstat functions
4399 */
4400
4401 /*
4402 * ehci_create_stats:
4403 *
4404 * Allocate and initialize the ehci kstat structures
4405 */
4406 void
4407 ehci_create_stats(ehci_state_t *ehcip)
4408 {
4409 char kstatname[KSTAT_STRLEN];
4410 const char *dname = ddi_driver_name(ehcip->ehci_dip);
4411 char *usbtypes[USB_N_COUNT_KSTATS] =
4412 {"ctrl", "isoch", "bulk", "intr"};
4413 uint_t instance = ehcip->ehci_instance;
4414 ehci_intrs_stats_t *isp;
4415 int i;
4416
4417 if (EHCI_INTRS_STATS(ehcip) == NULL) {
4418 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs",
4419 dname, instance);
4420 EHCI_INTRS_STATS(ehcip) = kstat_create("usba", instance,
4421 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED,
4422 sizeof (ehci_intrs_stats_t) / sizeof (kstat_named_t),
4423 KSTAT_FLAG_PERSISTENT);
4424
4425 if (EHCI_INTRS_STATS(ehcip)) {
4426 isp = EHCI_INTRS_STATS_DATA(ehcip);
4427 kstat_named_init(&isp->ehci_sts_total,
4428 "Interrupts Total", KSTAT_DATA_UINT64);
4429 kstat_named_init(&isp->ehci_sts_not_claimed,
4430 "Not Claimed", KSTAT_DATA_UINT64);
4431 kstat_named_init(&isp->ehci_sts_async_sched_status,
4432 "Async schedule status", KSTAT_DATA_UINT64);
4433 kstat_named_init(&isp->ehci_sts_periodic_sched_status,
4434 "Periodic sched status", KSTAT_DATA_UINT64);
4435 kstat_named_init(&isp->ehci_sts_empty_async_schedule,
4436 "Empty async schedule", KSTAT_DATA_UINT64);
4437 kstat_named_init(&isp->ehci_sts_host_ctrl_halted,
4438 "Host controller Halted", KSTAT_DATA_UINT64);
4439 kstat_named_init(&isp->ehci_sts_async_advance_intr,
4440 "Intr on async advance", KSTAT_DATA_UINT64);
4441 kstat_named_init(&isp->ehci_sts_host_system_error_intr,
4442 "Host system error", KSTAT_DATA_UINT64);
4443 kstat_named_init(&isp->ehci_sts_frm_list_rollover_intr,
4444 "Frame list rollover", KSTAT_DATA_UINT64);
4445 kstat_named_init(&isp->ehci_sts_rh_port_change_intr,
4446 "Port change detect", KSTAT_DATA_UINT64);
4447 kstat_named_init(&isp->ehci_sts_usb_error_intr,
4448 "USB error interrupt", KSTAT_DATA_UINT64);
4449 kstat_named_init(&isp->ehci_sts_usb_intr,
4450 "USB interrupt", KSTAT_DATA_UINT64);
4451
4452 EHCI_INTRS_STATS(ehcip)->ks_private = ehcip;
4453 EHCI_INTRS_STATS(ehcip)->ks_update = nulldev;
4454 kstat_install(EHCI_INTRS_STATS(ehcip));
4455 }
4456 }
4457
4458 if (EHCI_TOTAL_STATS(ehcip) == NULL) {
4459 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total",
4460 dname, instance);
4461 EHCI_TOTAL_STATS(ehcip) = kstat_create("usba", instance,
4462 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1,
4463 KSTAT_FLAG_PERSISTENT);
4464
4465 if (EHCI_TOTAL_STATS(ehcip)) {
4466 kstat_install(EHCI_TOTAL_STATS(ehcip));
4467 }
4468 }
4469
4470 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4471 if (ehcip->ehci_count_stats[i] == NULL) {
4472 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s",
4473 dname, instance, usbtypes[i]);
4474 ehcip->ehci_count_stats[i] = kstat_create("usba",
4475 instance, kstatname, "usb_byte_count",
4476 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
4477
4478 if (ehcip->ehci_count_stats[i]) {
4479 kstat_install(ehcip->ehci_count_stats[i]);
4480 }
4481 }
4482 }
4483 }
4484
4485
4486 /*
4487 * ehci_destroy_stats:
4488 *
4489 * Clean up ehci kstat structures
4490 */
4491 void
4492 ehci_destroy_stats(ehci_state_t *ehcip)
4493 {
4494 int i;
4495
4496 if (EHCI_INTRS_STATS(ehcip)) {
4497 kstat_delete(EHCI_INTRS_STATS(ehcip));
4498 EHCI_INTRS_STATS(ehcip) = NULL;
4499 }
4500
4501 if (EHCI_TOTAL_STATS(ehcip)) {
4502 kstat_delete(EHCI_TOTAL_STATS(ehcip));
4503 EHCI_TOTAL_STATS(ehcip) = NULL;
4504 }
4505
4506 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4507 if (ehcip->ehci_count_stats[i]) {
4508 kstat_delete(ehcip->ehci_count_stats[i]);
4509 ehcip->ehci_count_stats[i] = NULL;
4510 }
4511 }
4512 }
4513
4514
4515 /*
4516 * ehci_do_intrs_stats:
4517 *
4518 * ehci status information
4519 */
4520 void
4521 ehci_do_intrs_stats(
4522 ehci_state_t *ehcip,
4523 int val)
4524 {
4525 if (EHCI_INTRS_STATS(ehcip)) {
4526 EHCI_INTRS_STATS_DATA(ehcip)->ehci_sts_total.value.ui64++;
4527 switch (val) {
4528 case EHCI_STS_ASYNC_SCHED_STATUS:
4529 EHCI_INTRS_STATS_DATA(ehcip)->
4530 ehci_sts_async_sched_status.value.ui64++;
4531 break;
4532 case EHCI_STS_PERIODIC_SCHED_STATUS:
4533 EHCI_INTRS_STATS_DATA(ehcip)->
4534 ehci_sts_periodic_sched_status.value.ui64++;
4535 break;
4536 case EHCI_STS_EMPTY_ASYNC_SCHEDULE:
4537 EHCI_INTRS_STATS_DATA(ehcip)->
4538 ehci_sts_empty_async_schedule.value.ui64++;
4539 break;
4540 case EHCI_STS_HOST_CTRL_HALTED:
4541 EHCI_INTRS_STATS_DATA(ehcip)->
4542 ehci_sts_host_ctrl_halted.value.ui64++;
4543 break;
4544 case EHCI_STS_ASYNC_ADVANCE_INTR:
4545 EHCI_INTRS_STATS_DATA(ehcip)->
4546 ehci_sts_async_advance_intr.value.ui64++;
4547 break;
4548 case EHCI_STS_HOST_SYSTEM_ERROR_INTR:
4549 EHCI_INTRS_STATS_DATA(ehcip)->
4550 ehci_sts_host_system_error_intr.value.ui64++;
4551 break;
4552 case EHCI_STS_FRM_LIST_ROLLOVER_INTR:
4553 EHCI_INTRS_STATS_DATA(ehcip)->
4554 ehci_sts_frm_list_rollover_intr.value.ui64++;
4555 break;
4556 case EHCI_STS_RH_PORT_CHANGE_INTR:
4557 EHCI_INTRS_STATS_DATA(ehcip)->
4558 ehci_sts_rh_port_change_intr.value.ui64++;
4559 break;
4560 case EHCI_STS_USB_ERROR_INTR:
4561 EHCI_INTRS_STATS_DATA(ehcip)->
4562 ehci_sts_usb_error_intr.value.ui64++;
4563 break;
4564 case EHCI_STS_USB_INTR:
4565 EHCI_INTRS_STATS_DATA(ehcip)->
4566 ehci_sts_usb_intr.value.ui64++;
4567 break;
4568 default:
4569 EHCI_INTRS_STATS_DATA(ehcip)->
4570 ehci_sts_not_claimed.value.ui64++;
4571 break;
4572 }
4573 }
4574 }
4575
4576
4577 /*
4578 * ehci_do_byte_stats:
4579 *
4580 * ehci data xfer information
4581 */
4582 void
4583 ehci_do_byte_stats(
4584 ehci_state_t *ehcip,
4585 size_t len,
4586 uint8_t attr,
4587 uint8_t addr)
4588 {
4589 uint8_t type = attr & USB_EP_ATTR_MASK;
4590 uint8_t dir = addr & USB_EP_DIR_MASK;
4591
4592 if (dir == USB_EP_DIR_IN) {
4593 EHCI_TOTAL_STATS_DATA(ehcip)->reads++;
4594 EHCI_TOTAL_STATS_DATA(ehcip)->nread += len;
4595 switch (type) {
4596 case USB_EP_ATTR_CONTROL:
4597 EHCI_CTRL_STATS(ehcip)->reads++;
4598 EHCI_CTRL_STATS(ehcip)->nread += len;
4599 break;
4600 case USB_EP_ATTR_BULK:
4601 EHCI_BULK_STATS(ehcip)->reads++;
4602 EHCI_BULK_STATS(ehcip)->nread += len;
4603 break;
4604 case USB_EP_ATTR_INTR:
4605 EHCI_INTR_STATS(ehcip)->reads++;
4606 EHCI_INTR_STATS(ehcip)->nread += len;
4607 break;
4608 case USB_EP_ATTR_ISOCH:
4609 EHCI_ISOC_STATS(ehcip)->reads++;
4610 EHCI_ISOC_STATS(ehcip)->nread += len;
4611 break;
4612 }
4613 } else if (dir == USB_EP_DIR_OUT) {
4614 EHCI_TOTAL_STATS_DATA(ehcip)->writes++;
4615 EHCI_TOTAL_STATS_DATA(ehcip)->nwritten += len;
4616 switch (type) {
4617 case USB_EP_ATTR_CONTROL:
4618 EHCI_CTRL_STATS(ehcip)->writes++;
4619 EHCI_CTRL_STATS(ehcip)->nwritten += len;
4620 break;
4621 case USB_EP_ATTR_BULK:
4622 EHCI_BULK_STATS(ehcip)->writes++;
4623 EHCI_BULK_STATS(ehcip)->nwritten += len;
4624 break;
4625 case USB_EP_ATTR_INTR:
4626 EHCI_INTR_STATS(ehcip)->writes++;
4627 EHCI_INTR_STATS(ehcip)->nwritten += len;
4628 break;
4629 case USB_EP_ATTR_ISOCH:
4630 EHCI_ISOC_STATS(ehcip)->writes++;
4631 EHCI_ISOC_STATS(ehcip)->nwritten += len;
4632 break;
4633 }
4634 }
4635 }