126 * 6 0x60-0x7f 0x40-0x5f
127 * 7,8,9 0x80-0x8f 0x60-0x6f
128 * 10 0x90-0x9f 0x70-0x7f
129 * 11 0xa0-0xaf 0x80-0x8f
130 * ... ...
131 * 15 0xe0-0xef 0xc0-0xcf
132 * 15 0xf0-0xff 0xd0-0xdf
133 */
134 uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = {
135 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
136 };
137 /*
138 * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
139 * NOTE that this is vector as passed into intr_enter which is
140 * programmed vector - 0x20 (APIC_BASE_VECT)
141 */
142
143 uchar_t apic_ipltopri[MAXIPL + 1]; /* unix ipl to apic pri */
144 /* The taskpri to be programmed into apic to mask given ipl */
145
146 #if defined(__amd64)
147 uchar_t apic_cr8pri[MAXIPL + 1]; /* unix ipl to cr8 pri */
148 #endif
149
150 /*
151 * Correlation of the hardware vector to the IPL in use, initialized
152 * from apic_vectortoipl[] in apic_init(). The final IPLs may not correlate
153 * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
154 * connected to errata-stricken IOAPICs
155 */
156 uchar_t apic_ipls[APIC_AVAIL_VECTOR];
157
158 /*
159 * Patchable global variables.
160 */
161 int apic_enable_hwsoftint = 0; /* 0 - disable, 1 - enable */
162 int apic_enable_bind_log = 1; /* 1 - display interrupt binding log */
163
164 /*
165 * Local static data
166 */
167 static struct psm_ops apic_ops = {
168 apic_probe,
169
283
284 psm_get_ioapicid = apic_get_ioapicid;
285 psm_get_localapicid = apic_get_localapicid;
286 psm_xlate_vector_by_irq = apic_xlate_vector_by_irq;
287
288 apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */
289 for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) {
290 if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) &&
291 (apic_vectortoipl[i + 1] == apic_vectortoipl[i]))
292 /* get to highest vector at the same ipl */
293 continue;
294 for (; j <= apic_vectortoipl[i]; j++) {
295 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) +
296 APIC_BASE_VECT;
297 }
298 }
299 for (; j < MAXIPL + 1; j++)
300 /* fill up any empty ipltopri slots */
301 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
302 apic_init_common();
303 #if defined(__amd64)
304 /*
305 * Make cpu-specific interrupt info point to cr8pri vector
306 */
307 for (i = 0; i <= MAXIPL; i++)
308 apic_cr8pri[i] = apic_ipltopri[i] >> APIC_IPL_SHIFT;
309 CPU->cpu_pri_data = apic_cr8pri;
310 #else
311 if (cpuid_have_cr8access(CPU))
312 apic_have_32bit_cr8 = 1;
313 #endif /* __amd64 */
314 }
315
316 static void
317 apic_init_intr(void)
318 {
319 processorid_t cpun = psm_get_cpu_id();
320 uint_t nlvt;
321 uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
322
323 apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
324
325 if (apic_mode == LOCAL_APIC) {
326 /*
327 * We are running APIC in MMIO mode.
328 */
329 if (apic_flat_model) {
330 apic_reg_ops->apic_write(APIC_FORMAT_REG,
331 APIC_FLAT_MODEL);
332 } else {
333 apic_reg_ops->apic_write(APIC_FORMAT_REG,
564 * subtracts 0x20 from the vector before passing it to us.
565 * (That's why APIC_BASE_VECT is 0x20.)
566 */
567 vector = (uchar_t)*vectorp;
568
569 /* if interrupted by the clock, increment apic_nsec_since_boot */
570 if (vector == apic_clkvect) {
571 if (!apic_oneshot) {
572 /* NOTE: this is not MT aware */
573 apic_hrtime_stamp++;
574 apic_nsec_since_boot += apic_nsec_per_intr;
575 apic_hrtime_stamp++;
576 last_count_read = apic_hertz_count;
577 apic_redistribute_compute();
578 }
579
580 /* We will avoid all the book keeping overhead for clock */
581 nipl = apic_ipls[vector];
582
583 *vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT];
584 if (apic_mode == LOCAL_APIC) {
585 #if defined(__amd64)
586 setcr8((ulong_t)(apic_ipltopri[nipl] >>
587 APIC_IPL_SHIFT));
588 #else
589 if (apic_have_32bit_cr8)
590 setcr8((ulong_t)(apic_ipltopri[nipl] >>
591 APIC_IPL_SHIFT));
592 else
593 LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
594 (uint32_t)apic_ipltopri[nipl]);
595 #endif
596 LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
597 } else {
598 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
599 X2APIC_WRITE(APIC_EOI_REG, 0);
600 }
601
602 return (nipl);
603 }
604
605 cpu_infop = &apic_cpus[psm_get_cpu_id()];
606
607 if (vector == (APIC_SPUR_INTR - APIC_BASE_VECT)) {
608 cpu_infop->aci_spur_cnt++;
609 return (APIC_INT_SPURIOUS);
610 }
611
612 /* Check if the vector we got is really what we need */
613 if (apic_revector_pending) {
614 /*
615 * Disable interrupts for the duration of
616 * the vector translation to prevent a self-race for
617 * the apic_revector_lock. This cannot be done
618 * in apic_xlate_vector because it is recursive and
619 * we want the vector translation to be atomic with
620 * respect to other (higher-priority) interrupts.
621 */
622 iflag = intr_clear();
623 vector = apic_xlate_vector(vector + APIC_BASE_VECT) -
624 APIC_BASE_VECT;
625 intr_restore(iflag);
626 }
627
628 nipl = apic_ipls[vector];
629 *vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT];
630
631 if (apic_mode == LOCAL_APIC) {
632 #if defined(__amd64)
633 setcr8((ulong_t)(apic_ipltopri[nipl] >> APIC_IPL_SHIFT));
634 #else
635 if (apic_have_32bit_cr8)
636 setcr8((ulong_t)(apic_ipltopri[nipl] >>
637 APIC_IPL_SHIFT));
638 else
639 LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
640 (uint32_t)apic_ipltopri[nipl]);
641 #endif
642 } else {
643 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
644 }
645
646 cpu_infop->aci_current[nipl] = (uchar_t)irq;
647 cpu_infop->aci_curipl = (uchar_t)nipl;
648 cpu_infop->aci_ISR_in_progress |= 1 << nipl;
649
650 /*
651 * apic_level_intr could have been assimilated into the irq struct.
652 * but, having it as a character array is more efficient in terms of
653 * cache usage. So, we leave it as is.
654 */
655 if (!apic_level_intr[irq]) {
656 if (apic_mode == LOCAL_APIC) {
657 LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
658 } else {
659 X2APIC_WRITE(APIC_EOI_REG, 0);
660 }
661 }
662
663 #ifdef DEBUG
664 APIC_DEBUG_BUF_PUT(vector);
665 APIC_DEBUG_BUF_PUT(irq);
666 APIC_DEBUG_BUF_PUT(nipl);
667 APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
668 if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl)))
669 drv_usecwait(apic_stretch_interrupts);
670
671 if (apic_break_on_cpu == psm_get_cpu_id())
672 apic_break();
673 #endif /* DEBUG */
674 return (nipl);
675 }
676
677 /*
678 * This macro is a common code used by MMIO local apic and X2APIC
679 * local apic.
680 */
681 #define APIC_INTR_EXIT() \
682 { \
683 cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
684 if (apic_level_intr[irq]) \
685 apic_reg_ops->apic_send_eoi(irq); \
686 cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
687 /* ISR above current pri could not be in progress */ \
688 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
689 }
690
691 /*
692 * Any changes made to this function must also change X2APIC
693 * version of intr_exit.
694 */
695 void
696 apic_intr_exit(int prev_ipl, int irq)
697 {
698 apic_cpus_info_t *cpu_infop;
699
700 #if defined(__amd64)
701 setcr8((ulong_t)apic_cr8pri[prev_ipl]);
702 #else
703 if (apic_have_32bit_cr8)
704 setcr8((ulong_t)(apic_ipltopri[prev_ipl] >> APIC_IPL_SHIFT));
705 else
706 apicadr[APIC_TASK_REG] = apic_ipltopri[prev_ipl];
707 #endif
708
709 APIC_INTR_EXIT();
710 }
711
712 /*
713 * Same as apic_intr_exit() except it uses MSR rather than MMIO
714 * to access local apic registers.
715 */
716 void
717 x2apic_intr_exit(int prev_ipl, int irq)
718 {
719 apic_cpus_info_t *cpu_infop;
720
721 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]);
722 APIC_INTR_EXIT();
723 }
724
725 intr_exit_fn_t
726 psm_intr_exit_fn(void)
727 {
728 if (apic_mode == LOCAL_X2APIC)
729 return (x2apic_intr_exit);
730
731 return (apic_intr_exit);
732 }
733
734 /*
735 * Mask all interrupts below or equal to the given IPL.
736 * Any changes made to this function must also change X2APIC
737 * version of setspl.
738 */
739 static void
740 apic_setspl(int ipl)
741 {
742 #if defined(__amd64)
743 setcr8((ulong_t)apic_cr8pri[ipl]);
744 #else
745 if (apic_have_32bit_cr8)
746 setcr8((ulong_t)(apic_ipltopri[ipl] >> APIC_IPL_SHIFT));
747 else
748 apicadr[APIC_TASK_REG] = apic_ipltopri[ipl];
749 #endif
750
751 /* interrupts at ipl above this cannot be in progress */
752 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
753 /*
754 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
755 * have enough time to come in before the priority is raised again
756 * during the idle() loop.
757 */
758 if (apic_setspl_delay)
759 (void) apic_reg_ops->apic_get_pri();
760 }
761
762 /*
763 * X2APIC version of setspl.
764 * Mask all interrupts below or equal to the given IPL
765 */
766 static void
767 x2apic_setspl(int ipl)
768 {
769 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[ipl]);
|
126 * 6 0x60-0x7f 0x40-0x5f
127 * 7,8,9 0x80-0x8f 0x60-0x6f
128 * 10 0x90-0x9f 0x70-0x7f
129 * 11 0xa0-0xaf 0x80-0x8f
130 * ... ...
131 * 15 0xe0-0xef 0xc0-0xcf
132 * 15 0xf0-0xff 0xd0-0xdf
133 */
134 uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = {
135 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
136 };
137 /*
138 * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
139 * NOTE that this is vector as passed into intr_enter which is
140 * programmed vector - 0x20 (APIC_BASE_VECT)
141 */
142
143 uchar_t apic_ipltopri[MAXIPL + 1]; /* unix ipl to apic pri */
144 /* The taskpri to be programmed into apic to mask given ipl */
145
146 /*
147 * Correlation of the hardware vector to the IPL in use, initialized
148 * from apic_vectortoipl[] in apic_init(). The final IPLs may not correlate
149 * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
150 * connected to errata-stricken IOAPICs
151 */
152 uchar_t apic_ipls[APIC_AVAIL_VECTOR];
153
154 /*
155 * Patchable global variables.
156 */
157 int apic_enable_hwsoftint = 0; /* 0 - disable, 1 - enable */
158 int apic_enable_bind_log = 1; /* 1 - display interrupt binding log */
159
160 /*
161 * Local static data
162 */
163 static struct psm_ops apic_ops = {
164 apic_probe,
165
279
280 psm_get_ioapicid = apic_get_ioapicid;
281 psm_get_localapicid = apic_get_localapicid;
282 psm_xlate_vector_by_irq = apic_xlate_vector_by_irq;
283
284 apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */
285 for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) {
286 if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) &&
287 (apic_vectortoipl[i + 1] == apic_vectortoipl[i]))
288 /* get to highest vector at the same ipl */
289 continue;
290 for (; j <= apic_vectortoipl[i]; j++) {
291 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) +
292 APIC_BASE_VECT;
293 }
294 }
295 for (; j < MAXIPL + 1; j++)
296 /* fill up any empty ipltopri slots */
297 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
298 apic_init_common();
299
300 #if !defined(__amd64)
301 if (cpuid_have_cr8access(CPU))
302 apic_have_32bit_cr8 = 1;
303 #endif
304 }
305
306 static void
307 apic_init_intr(void)
308 {
309 processorid_t cpun = psm_get_cpu_id();
310 uint_t nlvt;
311 uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
312
313 apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
314
315 if (apic_mode == LOCAL_APIC) {
316 /*
317 * We are running APIC in MMIO mode.
318 */
319 if (apic_flat_model) {
320 apic_reg_ops->apic_write(APIC_FORMAT_REG,
321 APIC_FLAT_MODEL);
322 } else {
323 apic_reg_ops->apic_write(APIC_FORMAT_REG,
554 * subtracts 0x20 from the vector before passing it to us.
555 * (That's why APIC_BASE_VECT is 0x20.)
556 */
557 vector = (uchar_t)*vectorp;
558
559 /* if interrupted by the clock, increment apic_nsec_since_boot */
560 if (vector == apic_clkvect) {
561 if (!apic_oneshot) {
562 /* NOTE: this is not MT aware */
563 apic_hrtime_stamp++;
564 apic_nsec_since_boot += apic_nsec_per_intr;
565 apic_hrtime_stamp++;
566 last_count_read = apic_hertz_count;
567 apic_redistribute_compute();
568 }
569
570 /* We will avoid all the book keeping overhead for clock */
571 nipl = apic_ipls[vector];
572
573 *vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT];
574
575 apic_reg_ops->apic_write_task_reg(apic_ipltopri[nipl]);
576 apic_reg_ops->apic_send_eoi(0);
577
578 return (nipl);
579 }
580
581 cpu_infop = &apic_cpus[psm_get_cpu_id()];
582
583 if (vector == (APIC_SPUR_INTR - APIC_BASE_VECT)) {
584 cpu_infop->aci_spur_cnt++;
585 return (APIC_INT_SPURIOUS);
586 }
587
588 /* Check if the vector we got is really what we need */
589 if (apic_revector_pending) {
590 /*
591 * Disable interrupts for the duration of
592 * the vector translation to prevent a self-race for
593 * the apic_revector_lock. This cannot be done
594 * in apic_xlate_vector because it is recursive and
595 * we want the vector translation to be atomic with
596 * respect to other (higher-priority) interrupts.
597 */
598 iflag = intr_clear();
599 vector = apic_xlate_vector(vector + APIC_BASE_VECT) -
600 APIC_BASE_VECT;
601 intr_restore(iflag);
602 }
603
604 nipl = apic_ipls[vector];
605 *vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT];
606
607 apic_reg_ops->apic_write_task_reg(apic_ipltopri[nipl]);
608
609 cpu_infop->aci_current[nipl] = (uchar_t)irq;
610 cpu_infop->aci_curipl = (uchar_t)nipl;
611 cpu_infop->aci_ISR_in_progress |= 1 << nipl;
612
613 /*
614 * apic_level_intr could have been assimilated into the irq struct.
615 * but, having it as a character array is more efficient in terms of
616 * cache usage. So, we leave it as is.
617 */
618 if (!apic_level_intr[irq]) {
619 apic_reg_ops->apic_send_eoi(0);
620 }
621
622 #ifdef DEBUG
623 APIC_DEBUG_BUF_PUT(vector);
624 APIC_DEBUG_BUF_PUT(irq);
625 APIC_DEBUG_BUF_PUT(nipl);
626 APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
627 if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl)))
628 drv_usecwait(apic_stretch_interrupts);
629
630 if (apic_break_on_cpu == psm_get_cpu_id())
631 apic_break();
632 #endif /* DEBUG */
633 return (nipl);
634 }
635
636 /*
637 * This macro is a common code used by MMIO local apic and X2APIC
638 * local apic.
639 */
640 #define APIC_INTR_EXIT() \
641 { \
642 cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
643 if (apic_level_intr[irq]) \
644 apic_reg_ops->apic_send_eoi(irq); \
645 cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
646 /* ISR above current pri could not be in progress */ \
647 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
648 }
649
650 /*
651 * Any changes made to this function must also change X2APIC
652 * version of intr_exit.
653 */
654 void
655 apic_intr_exit(int prev_ipl, int irq)
656 {
657 apic_cpus_info_t *cpu_infop;
658
659 apic_reg_ops->apic_write_task_reg(apic_ipltopri[prev_ipl]);
660
661 APIC_INTR_EXIT();
662 }
663
664 /*
665 * Same as apic_intr_exit() except it uses MSR rather than MMIO
666 * to access local apic registers.
667 */
668 void
669 x2apic_intr_exit(int prev_ipl, int irq)
670 {
671 apic_cpus_info_t *cpu_infop;
672
673 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]);
674 APIC_INTR_EXIT();
675 }
676
677 intr_exit_fn_t
678 psm_intr_exit_fn(void)
679 {
680 if (apic_mode == LOCAL_X2APIC)
681 return (x2apic_intr_exit);
682
683 return (apic_intr_exit);
684 }
685
686 /*
687 * Mask all interrupts below or equal to the given IPL.
688 * Any changes made to this function must also change X2APIC
689 * version of setspl.
690 */
691 static void
692 apic_setspl(int ipl)
693 {
694 apic_reg_ops->apic_write_task_reg(apic_ipltopri[ipl]);
695
696 /* interrupts at ipl above this cannot be in progress */
697 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
698 /*
699 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
700 * have enough time to come in before the priority is raised again
701 * during the idle() loop.
702 */
703 if (apic_setspl_delay)
704 (void) apic_reg_ops->apic_get_pri();
705 }
706
707 /*
708 * X2APIC version of setspl.
709 * Mask all interrupts below or equal to the given IPL
710 */
711 static void
712 x2apic_setspl(int ipl)
713 {
714 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[ipl]);
|