6078 }
6079
6080 void
6081 cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits)
6082 {
6083 struct cpuid_info *cpi;
6084
6085 if (cpu == NULL)
6086 cpu = CPU;
6087 cpi = cpu->cpu_m.mcpu_cpi;
6088
6089 ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6090
6091 if (pabits)
6092 *pabits = cpi->cpi_pabits;
6093 if (vabits)
6094 *vabits = cpi->cpi_vabits;
6095 }
6096
6097 size_t
6098 cpuid_get_xsave_size()
6099 {
6100 return (MAX(cpuid_info0.cpi_xsave.xsav_max_size,
6101 sizeof (struct xsave_state)));
6102 }
6103
6104 /*
6105 * Return true if the CPUs on this system require 'pointer clearing' for the
6106 * floating point error pointer exception handling. In the past, this has been
6107 * true for all AMD K7 & K8 CPUs, although newer AMD CPUs have been changed to
6108 * behave the same as Intel. This is checked via the CPUID_AMD_EBX_ERR_PTR_ZERO
6109 * feature bit and is reflected in the cpi_fp_amd_save member.
6110 */
6111 boolean_t
6112 cpuid_need_fp_excp_handling()
6113 {
6114 return (cpuid_info0.cpi_vendor == X86_VENDOR_AMD &&
6115 cpuid_info0.cpi_fp_amd_save != 0);
6116 }
6117
6118 /*
6119 * Returns the number of data TLB entries for a corresponding
6120 * pagesize. If it can't be computed, or isn't known, the
6121 * routine returns zero. If you ask about an architecturally
6122 * impossible pagesize, the routine will panic (so that the
6123 * hat implementor knows that things are inconsistent.)
6124 */
6125 uint_t
6126 cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize)
6127 {
6128 struct cpuid_info *cpi;
6129 uint_t dtlb_nent = 0;
6130
6131 if (cpu == NULL)
6132 cpu = CPU;
|
6078 }
6079
6080 void
6081 cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits)
6082 {
6083 struct cpuid_info *cpi;
6084
6085 if (cpu == NULL)
6086 cpu = CPU;
6087 cpi = cpu->cpu_m.mcpu_cpi;
6088
6089 ASSERT(cpuid_checkpass(cpu, CPUID_PASS_BASIC));
6090
6091 if (pabits)
6092 *pabits = cpi->cpi_pabits;
6093 if (vabits)
6094 *vabits = cpi->cpi_vabits;
6095 }
6096
6097 size_t
6098 cpuid_get_xsave_size(void)
6099 {
6100 return (MAX(cpuid_info0.cpi_xsave.xsav_max_size,
6101 sizeof (struct xsave_state)));
6102 }
6103
6104 /*
6105 * Export information about known offsets to the kernel. We only care about
6106 * things we have actually enabled support for in %xcr0.
6107 */
6108 void
6109 cpuid_get_xsave_info(uint64_t bit, size_t *sizep, size_t *offp)
6110 {
6111 size_t size, off;
6112
6113 VERIFY3U(bit & xsave_bv_all, !=, 0);
6114
6115 if (sizep == NULL)
6116 sizep = &size;
6117 if (offp == NULL)
6118 offp = &off;
6119
6120 switch (bit) {
6121 case XFEATURE_LEGACY_FP:
6122 case XFEATURE_SSE:
6123 *sizep = sizeof (struct fxsave_state);
6124 *offp = 0;
6125 break;
6126 case XFEATURE_AVX:
6127 *sizep = cpuid_info0.cpi_xsave.ymm_size;
6128 *offp = cpuid_info0.cpi_xsave.ymm_offset;
6129 break;
6130 case XFEATURE_AVX512_OPMASK:
6131 *sizep = cpuid_info0.cpi_xsave.opmask_size;
6132 *offp = cpuid_info0.cpi_xsave.opmask_offset;
6133 break;
6134 case XFEATURE_AVX512_ZMM:
6135 *sizep = cpuid_info0.cpi_xsave.zmmlo_size;
6136 *offp = cpuid_info0.cpi_xsave.zmmlo_offset;
6137 break;
6138 case XFEATURE_AVX512_HI_ZMM:
6139 *sizep = cpuid_info0.cpi_xsave.zmmhi_size;
6140 *offp = cpuid_info0.cpi_xsave.zmmhi_offset;
6141 break;
6142 default:
6143 panic("asked for unsupported xsave feature: 0x%lx", bit);
6144 }
6145 }
6146
6147 /*
6148 * Return true if the CPUs on this system require 'pointer clearing' for the
6149 * floating point error pointer exception handling. In the past, this has been
6150 * true for all AMD K7 & K8 CPUs, although newer AMD CPUs have been changed to
6151 * behave the same as Intel. This is checked via the CPUID_AMD_EBX_ERR_PTR_ZERO
6152 * feature bit and is reflected in the cpi_fp_amd_save member.
6153 */
6154 boolean_t
6155 cpuid_need_fp_excp_handling(void)
6156 {
6157 return (cpuid_info0.cpi_vendor == X86_VENDOR_AMD &&
6158 cpuid_info0.cpi_fp_amd_save != 0);
6159 }
6160
6161 /*
6162 * Returns the number of data TLB entries for a corresponding
6163 * pagesize. If it can't be computed, or isn't known, the
6164 * routine returns zero. If you ask about an architecturally
6165 * impossible pagesize, the routine will panic (so that the
6166 * hat implementor knows that things are inconsistent.)
6167 */
6168 uint_t
6169 cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize)
6170 {
6171 struct cpuid_info *cpi;
6172 uint_t dtlb_nent = 0;
6173
6174 if (cpu == NULL)
6175 cpu = CPU;
|