Print this page
    
15254 %ymm registers not restored after signal handler
15367 x86 getfpregs() summons corrupting %xmm ghosts
15333 want x86 /proc xregs support (libc_db, libproc, mdb, etc.)
15336 want libc functions for extended ucontext_t
15334 want ps_lwphandle-specific reg routines
15328 FPU_CW_INIT mistreats reserved bit
15335 i86pc fpu_subr.c isn't really platform-specific
15332 setcontext(2) isn't actually noreturn
15331 need <sys/stdalign.h>
Change-Id: I7060aa86042dfb989f77fc3323c065ea2eafa9ad
Conflicts:
    usr/src/uts/common/fs/proc/prcontrol.c
    usr/src/uts/intel/os/archdep.c
    usr/src/uts/intel/sys/ucontext.h
    usr/src/uts/intel/syscall/getcontext.c
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/intel/sys/x86_archext.h
          +++ new/usr/src/uts/intel/sys/x86_archext.h
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 1995, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   * Copyright (c) 2011 by Delphix. All rights reserved.
  24   24   */
  25   25  /*
  26   26   * Copyright (c) 2010, Intel Corporation.
  27   27   * All rights reserved.
  28   28   */
  29   29  /*
  30   30   * Copyright 2020 Joyent, Inc.
  31   31   * Copyright 2012 Jens Elkner <jel+illumos@cs.uni-magdeburg.de>
  32   32   * Copyright 2012 Hans Rosenfeld <rosenfeld@grumpf.hope-2000.org>
  33   33   * Copyright 2014 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
  34   34   * Copyright 2018 Nexenta Systems, Inc.
  35   35   * Copyright 2023 Oxide Computer Company
  36   36   */
  37   37  
  38   38  #ifndef _SYS_X86_ARCHEXT_H
  39   39  #define _SYS_X86_ARCHEXT_H
  40   40  
  41   41  #if !defined(_ASM)
  42   42  #include <sys/bitext.h>
  43   43  #include <sys/regset.h>
  44   44  #include <sys/processor.h>
  45   45  #include <vm/seg_enum.h>
  46   46  #include <vm/page.h>
  47   47  #endif  /* _ASM */
  48   48  
  49   49  #ifdef  __cplusplus
  50   50  extern "C" {
  51   51  #endif
  52   52  
  53   53  /*
  54   54   * cpuid instruction feature flags in %edx (standard function 1)
  55   55   */
  56   56  
  57   57  #define CPUID_INTC_EDX_FPU      0x00000001      /* x87 fpu present */
  58   58  #define CPUID_INTC_EDX_VME      0x00000002      /* virtual-8086 extension */
  59   59  #define CPUID_INTC_EDX_DE       0x00000004      /* debugging extensions */
  60   60  #define CPUID_INTC_EDX_PSE      0x00000008      /* page size extension */
  61   61  #define CPUID_INTC_EDX_TSC      0x00000010      /* time stamp counter */
  62   62  #define CPUID_INTC_EDX_MSR      0x00000020      /* rdmsr and wrmsr */
  63   63  #define CPUID_INTC_EDX_PAE      0x00000040      /* physical addr extension */
  64   64  #define CPUID_INTC_EDX_MCE      0x00000080      /* machine check exception */
  65   65  #define CPUID_INTC_EDX_CX8      0x00000100      /* cmpxchg8b instruction */
  66   66  #define CPUID_INTC_EDX_APIC     0x00000200      /* local APIC */
  67   67                                                  /* 0x400 - reserved */
  68   68  #define CPUID_INTC_EDX_SEP      0x00000800      /* sysenter and sysexit */
  69   69  #define CPUID_INTC_EDX_MTRR     0x00001000      /* memory type range reg */
  70   70  #define CPUID_INTC_EDX_PGE      0x00002000      /* page global enable */
  71   71  #define CPUID_INTC_EDX_MCA      0x00004000      /* machine check arch */
  72   72  #define CPUID_INTC_EDX_CMOV     0x00008000      /* conditional move insns */
  73   73  #define CPUID_INTC_EDX_PAT      0x00010000      /* page attribute table */
  74   74  #define CPUID_INTC_EDX_PSE36    0x00020000      /* 36-bit pagesize extension */
  75   75  #define CPUID_INTC_EDX_PSN      0x00040000      /* processor serial number */
  76   76  #define CPUID_INTC_EDX_CLFSH    0x00080000      /* clflush instruction */
  77   77                                                  /* 0x100000 - reserved */
  78   78  #define CPUID_INTC_EDX_DS       0x00200000      /* debug store exists */
  79   79  #define CPUID_INTC_EDX_ACPI     0x00400000      /* monitoring + clock ctrl */
  80   80  #define CPUID_INTC_EDX_MMX      0x00800000      /* MMX instructions */
  81   81  #define CPUID_INTC_EDX_FXSR     0x01000000      /* fxsave and fxrstor */
  82   82  #define CPUID_INTC_EDX_SSE      0x02000000      /* streaming SIMD extensions */
  83   83  #define CPUID_INTC_EDX_SSE2     0x04000000      /* SSE extensions */
  84   84  #define CPUID_INTC_EDX_SS       0x08000000      /* self-snoop */
  85   85  #define CPUID_INTC_EDX_HTT      0x10000000      /* Hyper Thread Technology */
  86   86  #define CPUID_INTC_EDX_TM       0x20000000      /* thermal monitoring */
  87   87  #define CPUID_INTC_EDX_IA64     0x40000000      /* Itanium emulating IA32 */
  88   88  #define CPUID_INTC_EDX_PBE      0x80000000      /* Pending Break Enable */
  89   89  
  90   90  /*
  91   91   * cpuid instruction feature flags in %ecx (standard function 1)
  92   92   */
  93   93  
  94   94  #define CPUID_INTC_ECX_SSE3     0x00000001      /* Yet more SSE extensions */
  95   95  #define CPUID_INTC_ECX_PCLMULQDQ 0x00000002     /* PCLMULQDQ insn */
  96   96  #define CPUID_INTC_ECX_DTES64   0x00000004      /* 64-bit DS area */
  97   97  #define CPUID_INTC_ECX_MON      0x00000008      /* MONITOR/MWAIT */
  98   98  #define CPUID_INTC_ECX_DSCPL    0x00000010      /* CPL-qualified debug store */
  99   99  #define CPUID_INTC_ECX_VMX      0x00000020      /* Hardware VM extensions */
 100  100  #define CPUID_INTC_ECX_SMX      0x00000040      /* Secure mode extensions */
 101  101  #define CPUID_INTC_ECX_EST      0x00000080      /* enhanced SpeedStep */
 102  102  #define CPUID_INTC_ECX_TM2      0x00000100      /* thermal monitoring */
 103  103  #define CPUID_INTC_ECX_SSSE3    0x00000200      /* Supplemental SSE3 insns */
 104  104  #define CPUID_INTC_ECX_CID      0x00000400      /* L1 context ID */
 105  105                                                  /* 0x00000800 - reserved */
 106  106  #define CPUID_INTC_ECX_FMA      0x00001000      /* Fused Multiply Add */
 107  107  #define CPUID_INTC_ECX_CX16     0x00002000      /* cmpxchg16 */
 108  108  #define CPUID_INTC_ECX_ETPRD    0x00004000      /* extended task pri messages */
 109  109  #define CPUID_INTC_ECX_PDCM     0x00008000      /* Perf/Debug Capability MSR */
 110  110                                                  /* 0x00010000 - reserved */
 111  111  #define CPUID_INTC_ECX_PCID     0x00020000      /* process-context ids */
 112  112  #define CPUID_INTC_ECX_DCA      0x00040000      /* direct cache access */
 113  113  #define CPUID_INTC_ECX_SSE4_1   0x00080000      /* SSE4.1 insns */
 114  114  #define CPUID_INTC_ECX_SSE4_2   0x00100000      /* SSE4.2 insns */
 115  115  #define CPUID_INTC_ECX_X2APIC   0x00200000      /* x2APIC */
 116  116  #define CPUID_INTC_ECX_MOVBE    0x00400000      /* MOVBE insn */
 117  117  #define CPUID_INTC_ECX_POPCNT   0x00800000      /* POPCNT insn */
 118  118  #define CPUID_INTC_ECX_TSCDL    0x01000000      /* Deadline TSC */
 119  119  #define CPUID_INTC_ECX_AES      0x02000000      /* AES insns */
 120  120  #define CPUID_INTC_ECX_XSAVE    0x04000000      /* XSAVE/XRESTOR insns */
 121  121  #define CPUID_INTC_ECX_OSXSAVE  0x08000000      /* OS supports XSAVE insns */
 122  122  #define CPUID_INTC_ECX_AVX      0x10000000      /* AVX supported */
 123  123  #define CPUID_INTC_ECX_F16C     0x20000000      /* F16C supported */
 124  124  #define CPUID_INTC_ECX_RDRAND   0x40000000      /* RDRAND supported */
 125  125  #define CPUID_INTC_ECX_HV       0x80000000      /* Hypervisor */
 126  126  
 127  127  /*
 128  128   * cpuid instruction feature flags in %edx (extended function 0x80000001)
 129  129   */
 130  130  
 131  131  #define CPUID_AMD_EDX_FPU       0x00000001      /* x87 fpu present */
 132  132  #define CPUID_AMD_EDX_VME       0x00000002      /* virtual-8086 extension */
 133  133  #define CPUID_AMD_EDX_DE        0x00000004      /* debugging extensions */
 134  134  #define CPUID_AMD_EDX_PSE       0x00000008      /* page size extensions */
 135  135  #define CPUID_AMD_EDX_TSC       0x00000010      /* time stamp counter */
 136  136  #define CPUID_AMD_EDX_MSR       0x00000020      /* rdmsr and wrmsr */
 137  137  #define CPUID_AMD_EDX_PAE       0x00000040      /* physical addr extension */
 138  138  #define CPUID_AMD_EDX_MCE       0x00000080      /* machine check exception */
 139  139  #define CPUID_AMD_EDX_CX8       0x00000100      /* cmpxchg8b instruction */
 140  140  #define CPUID_AMD_EDX_APIC      0x00000200      /* local APIC */
 141  141                                                  /* 0x00000400 - sysc on K6m6 */
 142  142  #define CPUID_AMD_EDX_SYSC      0x00000800      /* AMD: syscall and sysret */
 143  143  #define CPUID_AMD_EDX_MTRR      0x00001000      /* memory type and range reg */
 144  144  #define CPUID_AMD_EDX_PGE       0x00002000      /* page global enable */
 145  145  #define CPUID_AMD_EDX_MCA       0x00004000      /* machine check arch */
 146  146  #define CPUID_AMD_EDX_CMOV      0x00008000      /* conditional move insns */
 147  147  #define CPUID_AMD_EDX_PAT       0x00010000      /* K7: page attribute table */
 148  148  #define CPUID_AMD_EDX_FCMOV     0x00010000      /* FCMOVcc etc. */
 149  149  #define CPUID_AMD_EDX_PSE36     0x00020000      /* 36-bit pagesize extension */
 150  150                                  /* 0x00040000 - reserved */
 151  151                                  /* 0x00080000 - reserved */
 152  152  #define CPUID_AMD_EDX_NX        0x00100000      /* AMD: no-execute page prot */
 153  153                                  /* 0x00200000 - reserved */
 154  154  #define CPUID_AMD_EDX_MMXamd    0x00400000      /* AMD: MMX extensions */
 155  155  #define CPUID_AMD_EDX_MMX       0x00800000      /* MMX instructions */
 156  156  #define CPUID_AMD_EDX_FXSR      0x01000000      /* fxsave and fxrstor */
 157  157  #define CPUID_AMD_EDX_FFXSR     0x02000000      /* fast fxsave/fxrstor */
 158  158  #define CPUID_AMD_EDX_1GPG      0x04000000      /* 1GB page */
 159  159  #define CPUID_AMD_EDX_TSCP      0x08000000      /* rdtscp instruction */
 160  160                                  /* 0x10000000 - reserved */
 161  161  #define CPUID_AMD_EDX_LM        0x20000000      /* AMD: long mode */
 162  162  #define CPUID_AMD_EDX_3DNowx    0x40000000      /* AMD: extensions to 3DNow! */
 163  163  #define CPUID_AMD_EDX_3DNow     0x80000000      /* AMD: 3DNow! instructions */
 164  164  
 165  165  /*
 166  166   * AMD extended function 0x80000001 %ecx
 167  167   */
 168  168  
 169  169  #define CPUID_AMD_ECX_AHF64     0x00000001      /* LAHF and SAHF in long mode */
 170  170  #define CPUID_AMD_ECX_CMP_LGCY  0x00000002      /* AMD: multicore chip */
 171  171  #define CPUID_AMD_ECX_SVM       0x00000004      /* AMD: secure VM */
 172  172  #define CPUID_AMD_ECX_EAS       0x00000008      /* extended apic space */
 173  173  #define CPUID_AMD_ECX_CR8D      0x00000010      /* AMD: 32-bit mov %cr8 */
 174  174  #define CPUID_AMD_ECX_LZCNT     0x00000020      /* AMD: LZCNT insn */
 175  175  #define CPUID_AMD_ECX_SSE4A     0x00000040      /* AMD: SSE4A insns */
 176  176  #define CPUID_AMD_ECX_MAS       0x00000080      /* AMD: MisAlignSse mnode */
 177  177  #define CPUID_AMD_ECX_3DNP      0x00000100      /* AMD: 3DNowPrefectch */
 178  178  #define CPUID_AMD_ECX_OSVW      0x00000200      /* AMD: OSVW */
 179  179  #define CPUID_AMD_ECX_IBS       0x00000400      /* AMD: IBS */
 180  180  #define CPUID_AMD_ECX_XOP       0x00000800      /* AMD: Extended Operation */
 181  181  #define CPUID_AMD_ECX_SKINIT    0x00001000      /* AMD: SKINIT */
 182  182  #define CPUID_AMD_ECX_WDT       0x00002000      /* AMD: WDT */
 183  183                                  /* 0x00004000 - reserved */
 184  184  #define CPUID_AMD_ECX_LWP       0x00008000      /* AMD: Lightweight profiling */
 185  185  #define CPUID_AMD_ECX_FMA4      0x00010000      /* AMD: 4-operand FMA support */
 186  186                                  /* 0x00020000 - reserved */
 187  187                                  /* 0x00040000 - reserved */
 188  188  #define CPUID_AMD_ECX_NIDMSR    0x00080000      /* AMD: Node ID MSR */
 189  189                                  /* 0x00100000 - reserved */
 190  190  #define CPUID_AMD_ECX_TBM       0x00200000      /* AMD: trailing bit manips. */
 191  191  #define CPUID_AMD_ECX_TOPOEXT   0x00400000      /* AMD: Topology Extensions */
 192  192  #define CPUID_AMD_ECX_PCEC      0x00800000      /* AMD: Core ext perf counter */
 193  193  #define CUPID_AMD_ECX_PCENB     0x01000000      /* AMD: NB ext perf counter */
 194  194                                  /* 0x02000000 - reserved */
 195  195  #define CPUID_AMD_ECX_DBKP      0x40000000      /* AMD: Data breakpoint */
 196  196  #define CPUID_AMD_ECX_PERFTSC   0x08000000      /* AMD: TSC Perf Counter */
 197  197  #define CPUID_AMD_ECX_PERFL3    0x10000000      /* AMD: L3 Perf Counter */
 198  198  #define CPUID_AMD_ECX_MONITORX  0x20000000      /* AMD: clzero */
 199  199                                  /* 0x40000000 - reserved */
 200  200                                  /* 0x80000000 - reserved */
 201  201  
 202  202  /*
 203  203   * AMD uses %ebx for some of their features (extended function 0x80000008).
 204  204   */
 205  205  #define CPUID_AMD_EBX_CLZERO            0x000000001 /* AMD: CLZERO instr */
 206  206  #define CPUID_AMD_EBX_IRCMSR            0x000000002 /* AMD: Ret. instrs MSR */
 207  207  #define CPUID_AMD_EBX_ERR_PTR_ZERO      0x000000004 /* AMD: FP Err. Ptr. Zero */
 208  208  #define CPUID_AMD_EBX_IBPB              0x000001000 /* AMD: IBPB */
 209  209  #define CPUID_AMD_EBX_IBRS              0x000004000 /* AMD: IBRS */
 210  210  #define CPUID_AMD_EBX_STIBP             0x000008000 /* AMD: STIBP */
 211  211  #define CPUID_AMD_EBX_IBRS_ALL          0x000010000 /* AMD: Enhanced IBRS */
 212  212  #define CPUID_AMD_EBX_STIBP_ALL         0x000020000 /* AMD: STIBP ALL */
 213  213  #define CPUID_AMD_EBX_PREFER_IBRS       0x000040000 /* AMD: Don't retpoline */
 214  214  #define CPUID_AMD_EBX_PPIN              0x000800000 /* AMD: PPIN Support */
 215  215  #define CPUID_AMD_EBX_SSBD              0x001000000 /* AMD: SSBD */
 216  216  #define CPUID_AMD_EBX_VIRT_SSBD         0x002000000 /* AMD: VIRT SSBD */
 217  217  #define CPUID_AMD_EBX_SSB_NO            0x004000000 /* AMD: SSB Fixed */
 218  218  
 219  219  /*
 220  220   * AMD SVM features (extended function 0x8000000A).
 221  221   */
 222  222  #define CPUID_AMD_EDX_NESTED_PAGING     0x000000001 /* AMD: SVM NP */
 223  223  #define CPUID_AMD_EDX_LBR_VIRT          0x000000002 /* AMD: LBR virt. */
 224  224  #define CPUID_AMD_EDX_SVML              0x000000004 /* AMD: SVM lock */
 225  225  #define CPUID_AMD_EDX_NRIPS             0x000000008 /* AMD: NRIP save */
 226  226  #define CPUID_AMD_EDX_TSC_RATE_MSR      0x000000010 /* AMD: MSR TSC ctrl */
 227  227  #define CPUID_AMD_EDX_VMCB_CLEAN        0x000000020 /* AMD: VMCB clean bits */
 228  228  #define CPUID_AMD_EDX_FLUSH_ASID        0x000000040 /* AMD: flush by ASID */
 229  229  #define CPUID_AMD_EDX_DECODE_ASSISTS    0x000000080 /* AMD: decode assists */
 230  230  
 231  231  /*
 232  232   * AMD Encrypted Memory Capabilities -- 0x8000_001F
 233  233   *
 234  234   * %ecx is the number of encrypted guests.
 235  235   * %edx is the minimum ASID value for SEV enabled, SEV-ES disabled guests
 236  236   */
 237  237  #define CPUID_AMD_8X1F_EAX_NVS          (1 << 29) /* VIRT_RMPUPDATE MSR */
 238  238  #define CPUID_AMD_8X1F_EAX_SCP          (1 << 28) /* SVSM Comm Page MSR */
 239  239  #define CPUID_AMD_8X1F_EAX_SMT_PROT     (1 << 25) /* SMT Protection */
 240  240  #define CPUID_AMD_8X1F_EAX_VMSAR_PROT   (1 << 24) /* VMSA Reg Protection */
 241  241  #define CPUID_AMD_8X1F_EAX_IBSVGC       (1 << 19) /* IBS Virt. for SEV-ES */
 242  242  #define CPUID_AMD_8X1F_EAX_VIRT_TOM     (1 << 18) /* Virt TOM MSR */
 243  243  #define CPUID_AMD_8X1F_EAX_VMGEXIT      (1 << 17) /* VMGEXIT Parameter */
 244  244  #define CPUID_AMD_8X1F_EAX_VTE          (1 << 16) /* Virt Transparent Enc. */
 245  245  #define CPUID_AMD_8X1F_EAX_NO_IBS       (1 << 15) /* No IBS by host */
 246  246  #define CPUID_AMD_8X1F_EAX_DBGSWP       (1 << 14) /* Debug state for SEV-ES */
 247  247  #define CPUID_AMD_8X1F_EAX_ALT_INJ      (1 << 13) /* Alternate Injection */
 248  248  #define CPUID_AMD_8X1F_EAX_RES_INJ      (1 << 12) /* Restricted Injection */
 249  249  #define CPUID_AMD_8X1F_EAX_64B_HOST     (1 << 11) /* SEV requires amd64 */
 250  250  #define CPUID_AMD_8X1F_EAX_HWECC        (1 << 10) /* HW cache coherency req */
 251  251  #define CPUID_AMD_8X1F_EAX_TSC_AUX      (1 << 9) /* TSC AUX Virtualization */
 252  252  #define CPUID_AMD_8X1F_EAX_SEC_TSC      (1 << 8) /* Secure TSC */
 253  253  #define CPUID_AMD_8X1F_EAX_VSSS         (1 << 7) /* VMPL Super. Shadow Stack */
 254  254  #define CPUID_AMD_8X1F_EAX_RMPQUERY     (1 << 6) /* RMPQUERY Instr */
 255  255  #define CPUID_AMD_8X1F_EAX_VMPL         (1 << 5) /* VM Permission Levels */
 256  256  #define CPUID_AMD_8X1F_EAX_SEV_SNP      (1 << 4) /* SEV Secure Nested Paging */
 257  257  #define CPUID_AMD_8X1F_EAX_SEV_ES       (1 << 3) /* SEV Encrypted State */
 258  258  #define CPUID_AMD_8X1F_EAX_PAGE_FLUSH   (1 << 2) /* Page Flush MSR */
 259  259  #define CPUID_AMD_8X1F_EAX_SEV          (1 << 1) /* Secure Encrypted Virt. */
 260  260  #define CPUID_AMD_8X1F_EAX_SME          (1 << 0) /* Secure Memory Encrypt. */
 261  261  
 262  262  #define CPUID_AMD_8X1F_EBX_NVMPL(r)     bitx32(r, 15, 12) /* num VM Perm lvl */
 263  263  #define CPUID_AMD_8X1F_EBX_PAR(r)       bitx32(r, 11, 6) /* paddr bit rem */
 264  264  #define CPUID_AMD_8X1F_EBX_CBIT(r)      bitx32(r, 5, 0) /* C-bit loc in PTE */
 265  265  
 266  266  /*
 267  267   * AMD Platform QoS Extended Features -- 0x8000_0020
 268  268   */
 269  269  #define CPUID_AMD_8X20_EBX_L3RR         (1 << 4) /* L3 Range Reservations */
 270  270  
 271  271  /*
 272  272   * AMD Extended Feature 2 -- 0x8000_0021
 273  273   */
 274  274  #define CPUID_AMD_8X21_EAX_CPUID_DIS    (1 << 17) /* CPUID dis for CPL > 0 */
 275  275  #define CPUID_AMD_8X21_EAX_PREFETCH     (1 << 13) /* Prefetch control MSR  */
 276  276  #define CPUID_AMD_8X21_EAX_NO_SMMCTL    (1 << 9) /* No SMM_CTL MSR */
 277  277  #define CPUID_AMD_8X21_EAX_AIBRS        (1 << 8) /* Automatic IBRS */
 278  278  #define CPUID_AMD_8X21_EAX_UAI          (1 << 7) /* Upper Address Ignore */
 279  279  #define CPUID_AMD_8X21_EAX_SMM_PGLK     (1 << 3) /* SMM Page config lock */
 280  280  #define CPUID_AMD_8X21_EAX_LFENCE_SER   (1 << 2) /* LFENCE is dispatch serial */
 281  281  #define CPUID_AMD_8X21_EAX_NO_NDBP      (1 << 0) /* No nested data #BP */
 282  282  
 283  283  #define CPUID_AMD_8X21_EBX_MPS(r)       bitx32(11, 0) /* MCU Patch size x 16B */
 284  284  
 285  285  /*
 286  286   * AMD Extended Performance Monitoring and Debug -- 0x8000_0022
 287  287   */
 288  288  #define CPUID_AMD_8X22_LBR_FRZ  (1 << 2)        /* Freeze PMC / LBR on ovflw */
 289  289  #define CPUID_AMD_8X22_LBR_STK  (1 << 1)        /* Last Branch Record Stack */
 290  290  #define CPUID_AMD_8X22_EAX_PMV2 (1 << 0)        /* Perfmon v2 */
 291  291  
 292  292  #define CPUID_AMD_8X22_EBX_NPMC_NB(r)   bitx32(r, 15, 10) /* # NB PMC */
 293  293  #define CPUID_AMD_8X22_EBX_LBR_SZ(r)    bitx32(r, 9, 4) /* # LBR Stack ents. */
 294  294  #define CPUID_AMD_8X22_EBX_NPMC_CORE(r) bitx32(r, 3, 0) /* # core PMC */
 295  295  
 296  296  /*
 297  297   * AMD Secure Multi-key Encryption -- 0x8000_00023
 298  298   */
 299  299  #define CPUID_AMD_8X23_EAX_MEMHMK       (1 << 0) /* Secure Host Multi-Key Mem */
 300  300  
 301  301  #define CPUID_AMD_8x23_EBX_MAX_HMK(r)   bitx32(r, 15, 0) /* Max HMK IDs */
 302  302  
 303  303  /*
 304  304   * Intel now seems to have claimed part of the "extended" function
 305  305   * space that we previously for non-Intel implementors to use.
 306  306   * More excitingly still, they've claimed bit 20 to mean LAHF/SAHF
 307  307   * is available in long mode i.e. what AMD indicate using bit 0.
 308  308   * On the other hand, everything else is labelled as reserved.
 309  309   */
 310  310  #define CPUID_INTC_ECX_AHF64    0x00100000      /* LAHF and SAHF in long mode */
 311  311  
 312  312  /*
 313  313   * Intel uses cpuid leaf 6 to cover various thermal and power control
 314  314   * operations.
 315  315   */
 316  316  #define CPUID_INTC_EAX_DTS      0x00000001      /* Digital Thermal Sensor */
 317  317  #define CPUID_INTC_EAX_TURBO    0x00000002      /* Turboboost */
 318  318  #define CPUID_INTC_EAX_ARAT     0x00000004      /* APIC-Timer-Always-Running */
 319  319  /* bit 3 is reserved */
 320  320  #define CPUID_INTC_EAX_PLN      0x00000010      /* Power limit notification */
 321  321  #define CPUID_INTC_EAX_ECMD     0x00000020      /* Clock mod. duty cycle */
 322  322  #define CPUID_INTC_EAX_PTM      0x00000040      /* Package thermal management */
 323  323  #define CPUID_INTC_EAX_HWP      0x00000080      /* HWP base registers */
 324  324  #define CPUID_INTC_EAX_HWP_NOT  0x00000100      /* HWP Notification */
 325  325  #define CPUID_INTC_EAX_HWP_ACT  0x00000200      /* HWP Activity Window */
 326  326  #define CPUID_INTC_EAX_HWP_EPR  0x00000400      /* HWP Energy Perf. Pref. */
 327  327  #define CPUID_INTC_EAX_HWP_PLR  0x00000800      /* HWP Package Level Request */
 328  328  /* bit 12 is reserved */
 329  329  #define CPUID_INTC_EAX_HDC      0x00002000      /* HDC */
 330  330  #define CPUID_INTC_EAX_TURBO3   0x00004000      /* Turbo Boost Max Tech 3.0 */
 331  331  #define CPUID_INTC_EAX_HWP_CAP  0x00008000      /* HWP Capabilities */
 332  332  #define CPUID_INTC_EAX_HWP_PECI 0x00010000      /* HWP PECI override */
 333  333  #define CPUID_INTC_EAX_HWP_FLEX 0x00020000      /* Flexible HWP */
 334  334  #define CPUID_INTC_EAX_HWP_FAST 0x00040000      /* Fast IA32_HWP_REQUEST */
 335  335  /* bit 19 is reserved */
 336  336  #define CPUID_INTC_EAX_HWP_IDLE 0x00100000      /* Ignore Idle Logical HWP */
 337  337  
 338  338  #define CPUID_INTC_EBX_DTS_NTRESH(x)    ((x) & 0xf)
 339  339  
 340  340  #define CPUID_INTC_ECX_MAPERF   0x00000001      /* IA32_MPERF / IA32_APERF */
 341  341  /* bits 1-2 are reserved */
 342  342  #define CPUID_INTC_ECX_PERFBIAS 0x00000008      /* IA32_ENERGY_PERF_BIAS */
 343  343  
 344  344  /*
 345  345   * Intel also uses cpuid leaf 7 to have additional instructions and features.
 346  346   * Like some other leaves, but unlike the current ones we care about, it
 347  347   * requires us to specify both a leaf in %eax and a sub-leaf in %ecx. To deal
 348  348   * with the potential use of additional sub-leaves in the future, we now
 349  349   * specifically label the EBX features with their leaf and sub-leaf.
 350  350   */
 351  351  #define CPUID_INTC_EBX_7_0_FSGSBASE     0x00000001      /* FSGSBASE */
 352  352  #define CPUID_INTC_EBX_7_0_TSC_ADJ      0x00000002      /* TSC adjust MSR */
 353  353  #define CPUID_INTC_EBX_7_0_SGX          0x00000004      /* SGX */
 354  354  #define CPUID_INTC_EBX_7_0_BMI1         0x00000008      /* BMI1 instrs */
 355  355  #define CPUID_INTC_EBX_7_0_HLE          0x00000010      /* HLE */
 356  356  #define CPUID_INTC_EBX_7_0_AVX2         0x00000020      /* AVX2 supported */
 357  357  #define CPUID_INTC_EBX_7_0_FDP_EXCPN    0x00000040      /* FDP on exception */
 358  358  #define CPUID_INTC_EBX_7_0_SMEP         0x00000080      /* SMEP in CR4 */
 359  359  #define CPUID_INTC_EBX_7_0_BMI2         0x00000100      /* BMI2 instrs */
 360  360  #define CPUID_INTC_EBX_7_0_ENH_REP_MOV  0x00000200      /* Enhanced REP MOVSB */
 361  361  #define CPUID_INTC_EBX_7_0_INVPCID      0x00000400      /* invpcid instr */
 362  362  #define CPUID_INTC_EBX_7_0_RTM          0x00000800      /* RTM instrs */
 363  363  #define CPUID_INTC_EBX_7_0_PQM          0x00001000      /* QoS Monitoring */
 364  364  #define CPUID_INTC_EBX_7_0_DEP_CSDS     0x00002000      /* Deprecates CS/DS */
 365  365  #define CPUID_INTC_EBX_7_0_MPX          0x00004000      /* Mem. Prot. Ext. */
 366  366  #define CPUID_INTC_EBX_7_0_PQE          0x00080000      /* QoS Enforcement */
 367  367  #define CPUID_INTC_EBX_7_0_AVX512F      0x00010000      /* AVX512 foundation */
 368  368  #define CPUID_INTC_EBX_7_0_AVX512DQ     0x00020000      /* AVX512DQ */
 369  369  #define CPUID_INTC_EBX_7_0_RDSEED       0x00040000      /* RDSEED instr */
 370  370  #define CPUID_INTC_EBX_7_0_ADX          0x00080000      /* ADX instrs */
 371  371  #define CPUID_INTC_EBX_7_0_SMAP         0x00100000      /* SMAP in CR 4 */
 372  372  #define CPUID_INTC_EBX_7_0_AVX512IFMA   0x00200000      /* AVX512IFMA */
 373  373  /* Bit 22 is reserved */
 374  374  #define CPUID_INTC_EBX_7_0_CLFLUSHOPT   0x00800000      /* CLFLUSOPT */
 375  375  #define CPUID_INTC_EBX_7_0_CLWB         0x01000000      /* CLWB */
 376  376  #define CPUID_INTC_EBX_7_0_PTRACE       0x02000000      /* Processor Trace */
 377  377  #define CPUID_INTC_EBX_7_0_AVX512PF     0x04000000      /* AVX512PF */
 378  378  #define CPUID_INTC_EBX_7_0_AVX512ER     0x08000000      /* AVX512ER */
 379  379  #define CPUID_INTC_EBX_7_0_AVX512CD     0x10000000      /* AVX512CD */
 380  380  #define CPUID_INTC_EBX_7_0_SHA          0x20000000      /* SHA extensions */
 381  381  #define CPUID_INTC_EBX_7_0_AVX512BW     0x40000000      /* AVX512BW */
 382  382  #define CPUID_INTC_EBX_7_0_AVX512VL     0x80000000      /* AVX512VL */
 383  383  
 384  384  #define CPUID_INTC_EBX_7_0_ALL_AVX512 \
 385  385          (CPUID_INTC_EBX_7_0_AVX512F | CPUID_INTC_EBX_7_0_AVX512DQ | \
 386  386          CPUID_INTC_EBX_7_0_AVX512IFMA | CPUID_INTC_EBX_7_0_AVX512PF | \
 387  387          CPUID_INTC_EBX_7_0_AVX512ER | CPUID_INTC_EBX_7_0_AVX512CD | \
 388  388          CPUID_INTC_EBX_7_0_AVX512BW | CPUID_INTC_EBX_7_0_AVX512VL)
 389  389  
 390  390  #define CPUID_INTC_ECX_7_0_PREFETCHWT1  0x00000001      /* PREFETCHWT1 */
 391  391  #define CPUID_INTC_ECX_7_0_AVX512VBMI   0x00000002      /* AVX512VBMI */
 392  392  #define CPUID_INTC_ECX_7_0_UMIP         0x00000004      /* UMIP */
 393  393  #define CPUID_INTC_ECX_7_0_PKU          0x00000008      /* umode prot. keys */
 394  394  #define CPUID_INTC_ECX_7_0_OSPKE        0x00000010      /* OSPKE */
 395  395  #define CPUID_INTC_ECX_7_0_WAITPKG      0x00000020      /* WAITPKG */
 396  396  #define CPUID_INTC_ECX_7_0_AVX512VBMI2  0x00000040      /* AVX512 VBMI2 */
 397  397  #define CPUID_INTC_ECX_7_0_CET_SS       0x00000080      /* CET Shadow Stack */
 398  398  #define CPUID_INTC_ECX_7_0_GFNI         0x00000100      /* GFNI */
 399  399  #define CPUID_INTC_ECX_7_0_VAES         0x00000200      /* VAES */
 400  400  #define CPUID_INTC_ECX_7_0_VPCLMULQDQ   0x00000400      /* VPCLMULQDQ */
 401  401  #define CPUID_INTC_ECX_7_0_AVX512VNNI   0x00000800      /* AVX512 VNNI */
 402  402  #define CPUID_INTC_ECX_7_0_AVX512BITALG 0x00001000      /* AVX512 BITALG */
 403  403  #define CPUID_INTC_ECX_7_0_TME_EN       0x00002000      /* Total Memory Encr. */
 404  404  #define CPUID_INTC_ECX_7_0_AVX512VPOPCDQ 0x00004000     /* AVX512 VPOPCNTDQ */
 405  405  /* bit 15 is reserved */
 406  406  #define CPUID_INTC_ECX_7_0_LA57         0x00010000      /* 57-bit paging */
 407  407  /* bits 17-21 are the value of MAWAU */
 408  408  #define CPUID_INTC_ECX_7_0_RDPID        0x00400000      /* RPID, IA32_TSC_AUX */
 409  409  #define CPUID_INTC_ECX_7_0_KLSUP        0x00800000      /* Key Locker */
 410  410  /* bit 24 is reserved */
 411  411  #define CPUID_INTC_ECX_7_0_CLDEMOTE     0x02000000      /* Cache line demote */
 412  412  /* bit 26 is resrved */
 413  413  #define CPUID_INTC_ECX_7_0_MOVDIRI      0x08000000      /* MOVDIRI insn */
 414  414  #define CPUID_INTC_ECX_7_0_MOVDIR64B    0x10000000      /* MOVDIR64B insn */
 415  415  #define CPUID_INTC_ECX_7_0_ENQCMD       0x20000000      /* Enqueue Stores */
 416  416  #define CPUID_INTC_ECX_7_0_SGXLC        0x40000000      /* SGX Launch config */
 417  417  #define CPUID_INTC_ECX_7_0_PKS          0x80000000      /* protection keys */
 418  418  
 419  419  /*
 420  420   * While CPUID_INTC_ECX_7_0_GFNI, CPUID_INTC_ECX_7_0_VAES, and
 421  421   * CPUID_INTC_ECX_7_0_VPCLMULQDQ all have AVX512 components, they are still
 422  422   * valid when AVX512 is not. However, the following flags all are only valid
 423  423   * when AVX512 is present.
 424  424   */
 425  425  #define CPUID_INTC_ECX_7_0_ALL_AVX512 \
 426  426          (CPUID_INTC_ECX_7_0_AVX512VBMI | CPUID_INTC_ECX_7_0_AVX512VNNI | \
 427  427          CPUID_INTC_ECX_7_0_AVX512BITALG | CPUID_INTC_ECX_7_0_AVX512VPOPCDQ)
 428  428  
 429  429  /* bits 0-1 are reserved */
 430  430  #define CPUID_INTC_EDX_7_0_AVX5124NNIW  0x00000004      /* AVX512 4NNIW */
 431  431  #define CPUID_INTC_EDX_7_0_AVX5124FMAPS 0x00000008      /* AVX512 4FMAPS */
 432  432  #define CPUID_INTC_EDX_7_0_FSREPMOV     0x00000010      /* fast short rep mov */
 433  433  #define CPUID_INTC_EDX_7_0_UINTR        0x00000020      /* user interrupts */
 434  434  /* bits 6-7 are reserved */
 435  435  #define CPUID_INTC_EDX_7_0_AVX512VP2INT 0x00000100      /* VP2INTERSECT */
 436  436  /* bit 9 is reserved */
 437  437  #define CPUID_INTC_EDX_7_0_MD_CLEAR     0x00000400      /* MB VERW */
 438  438  /* bits 11-13 are reserved */
 439  439  #define CPUID_INTC_EDX_7_0_SERIALIZE    0x00004000      /* Serialize instr */
 440  440  #define CPUID_INTC_EDX_7_0_HYBRID       0x00008000      /* Hybrid CPU */
 441  441  #define CPUID_INTC_EDX_7_0_TSXLDTRK     0x00010000      /* TSX load track */
 442  442  /* bit 17 is reserved */
 443  443  #define CPUID_INTC_EDX_7_0_PCONFIG      0x00040000      /* PCONFIG */
 444  444  /* bit 19 is reserved */
 445  445  #define CPUID_INTC_EDX_7_0_CET_IBT      0x00100000      /* CET ind. branch */
 446  446  /* bit 21 is reserved */
 447  447  #define CPUID_INTC_EDX_7_0_AMX_BF16     0x00400000      /* Tile F16 */
 448  448  #define CPUID_INTC_EDX_7_0_AVX512FP16   0x00800000      /* AVX512 FP16 */
 449  449  #define CPUID_INTC_EDX_7_0_AMX_TILE     0x01000000      /* Tile arch */
 450  450  #define CPUID_INTC_EDX_7_0_AMX_INT8     0x02000000      /* Tile INT8 */
 451  451  #define CPUID_INTC_EDX_7_0_SPEC_CTRL    0x04000000      /* Spec, IBPB, IBRS */
 452  452  #define CPUID_INTC_EDX_7_0_STIBP        0x08000000      /* STIBP */
 453  453  #define CPUID_INTC_EDX_7_0_FLUSH_CMD    0x10000000      /* IA32_FLUSH_CMD */
 454  454  #define CPUID_INTC_EDX_7_0_ARCH_CAPS    0x20000000      /* IA32_ARCH_CAPS */
 455  455  #define CPUID_INTC_EDX_7_0_SSBD         0x80000000      /* SSBD */
 456  456  
 457  457  #define CPUID_INTC_EDX_7_0_ALL_AVX512 \
 458  458          (CPUID_INTC_EDX_7_0_AVX5124NNIW | CPUID_INTC_EDX_7_0_AVX5124FMAPS | \
 459  459          CPUID_INTC_EDX_7_0_AVX512VP2INT | CPUID_INTC_EDX_7_0_AVX512FP16)
 460  460  
 461  461  /* bits 0-3 are reserved */
 462  462  #define CPUID_INTC_EAX_7_1_AVXVNNI      0x00000010      /* VEX VNNI */
 463  463  #define CPUID_INTC_EAX_7_1_AVX512_BF16  0x00000020      /* AVX512 BF16 */
 464  464  /* bits 6-9 are reserved */
 465  465  #define CPUID_INTC_EAX_7_1_ZL_MOVSB     0x00000400      /* zero-length MOVSB */
 466  466  #define CPUID_INTC_EAX_7_1_FS_STOSB     0x00000800      /* fast short STOSB */
 467  467  #define CPUID_INTC_EAX_7_1_FS_CMPSB     0x00001000      /* fast CMPSB, SCASB */
 468  468  /* bits 13-21 are reserved */
 469  469  #define CPUID_INTC_EAX_7_1_HRESET       0x00400000      /* History Reset leaf */
 470  470  /* bits 23-25 are reserved */
 471  471  #define CPUID_INTC_EAX_7_1_LAM          0x02000000      /* Linear addr mask */
 472  472  /* bits 27-31 are reserved */
 473  473  
 474  474  /*
 475  475   * Intel also uses cpuid leaf 0xd to report additional instructions and features
 476  476   * when the sub-leaf in %ecx == 1. We label these using the same convention as
 477  477   * with leaf 7.
 478  478   */
 479  479  #define CPUID_INTC_EAX_D_1_XSAVEOPT     0x00000001      /* xsaveopt inst. */
 480  480  #define CPUID_INTC_EAX_D_1_XSAVEC       0x00000002      /* xsavec inst. */
 481  481  #define CPUID_INTC_EAX_D_1_XSAVES       0x00000008      /* xsaves inst. */
 482  482  
 483  483  #define REG_PAT                 0x277
 484  484  #define REG_TSC                 0x10    /* timestamp counter */
 485  485  #define REG_APIC_BASE_MSR       0x1b
 486  486  #define REG_X2APIC_BASE_MSR     0x800   /* The MSR address offset of x2APIC */
 487  487  
 488  488  #if !defined(__xpv)
 489  489  /*
 490  490   * AMD C1E
 491  491   */
 492  492  #define MSR_AMD_INT_PENDING_CMP_HALT    0xC0010055
 493  493  #define AMD_ACTONCMPHALT_SHIFT  27
 494  494  #define AMD_ACTONCMPHALT_MASK   3
 495  495  #endif
 496  496  
 497  497  #define MSR_DEBUGCTL            0x1d9
 498  498  
 499  499  #define DEBUGCTL_LBR            0x01
 500  500  #define DEBUGCTL_BTF            0x02
 501  501  
 502  502  /* Intel P6, AMD */
 503  503  #define MSR_LBR_FROM            0x1db
 504  504  #define MSR_LBR_TO              0x1dc
 505  505  #define MSR_LEX_FROM            0x1dd
 506  506  #define MSR_LEX_TO              0x1de
 507  507  
 508  508  /* Intel P4 (pre-Prescott, non P4 M) */
 509  509  #define MSR_P4_LBSTK_TOS        0x1da
 510  510  #define MSR_P4_LBSTK_0          0x1db
 511  511  #define MSR_P4_LBSTK_1          0x1dc
 512  512  #define MSR_P4_LBSTK_2          0x1dd
 513  513  #define MSR_P4_LBSTK_3          0x1de
 514  514  
 515  515  /* Intel Pentium M */
 516  516  #define MSR_P6M_LBSTK_TOS       0x1c9
 517  517  #define MSR_P6M_LBSTK_0         0x040
 518  518  #define MSR_P6M_LBSTK_1         0x041
 519  519  #define MSR_P6M_LBSTK_2         0x042
 520  520  #define MSR_P6M_LBSTK_3         0x043
 521  521  #define MSR_P6M_LBSTK_4         0x044
 522  522  #define MSR_P6M_LBSTK_5         0x045
 523  523  #define MSR_P6M_LBSTK_6         0x046
 524  524  #define MSR_P6M_LBSTK_7         0x047
 525  525  
 526  526  /* Intel P4 (Prescott) */
 527  527  #define MSR_PRP4_LBSTK_TOS      0x1da
 528  528  #define MSR_PRP4_LBSTK_FROM_0   0x680
 529  529  #define MSR_PRP4_LBSTK_FROM_1   0x681
 530  530  #define MSR_PRP4_LBSTK_FROM_2   0x682
 531  531  #define MSR_PRP4_LBSTK_FROM_3   0x683
 532  532  #define MSR_PRP4_LBSTK_FROM_4   0x684
 533  533  #define MSR_PRP4_LBSTK_FROM_5   0x685
 534  534  #define MSR_PRP4_LBSTK_FROM_6   0x686
 535  535  #define MSR_PRP4_LBSTK_FROM_7   0x687
 536  536  #define MSR_PRP4_LBSTK_FROM_8   0x688
 537  537  #define MSR_PRP4_LBSTK_FROM_9   0x689
 538  538  #define MSR_PRP4_LBSTK_FROM_10  0x68a
 539  539  #define MSR_PRP4_LBSTK_FROM_11  0x68b
 540  540  #define MSR_PRP4_LBSTK_FROM_12  0x68c
 541  541  #define MSR_PRP4_LBSTK_FROM_13  0x68d
 542  542  #define MSR_PRP4_LBSTK_FROM_14  0x68e
 543  543  #define MSR_PRP4_LBSTK_FROM_15  0x68f
 544  544  #define MSR_PRP4_LBSTK_TO_0     0x6c0
 545  545  #define MSR_PRP4_LBSTK_TO_1     0x6c1
 546  546  #define MSR_PRP4_LBSTK_TO_2     0x6c2
 547  547  #define MSR_PRP4_LBSTK_TO_3     0x6c3
 548  548  #define MSR_PRP4_LBSTK_TO_4     0x6c4
 549  549  #define MSR_PRP4_LBSTK_TO_5     0x6c5
 550  550  #define MSR_PRP4_LBSTK_TO_6     0x6c6
 551  551  #define MSR_PRP4_LBSTK_TO_7     0x6c7
 552  552  #define MSR_PRP4_LBSTK_TO_8     0x6c8
 553  553  #define MSR_PRP4_LBSTK_TO_9     0x6c9
 554  554  #define MSR_PRP4_LBSTK_TO_10    0x6ca
 555  555  #define MSR_PRP4_LBSTK_TO_11    0x6cb
 556  556  #define MSR_PRP4_LBSTK_TO_12    0x6cc
 557  557  #define MSR_PRP4_LBSTK_TO_13    0x6cd
 558  558  #define MSR_PRP4_LBSTK_TO_14    0x6ce
 559  559  #define MSR_PRP4_LBSTK_TO_15    0x6cf
 560  560  
 561  561  /*
 562  562   * PPIN definitions for Intel and AMD. Unfortunately, Intel and AMD use
 563  563   * different MSRS for this and different MSRS to control whether or not it
 564  564   * should be readable.
 565  565   */
 566  566  #define MSR_PPIN_CTL_INTC       0x04e
 567  567  #define MSR_PPIN_INTC           0x04f
 568  568  #define MSR_PLATFORM_INFO       0x0ce
 569  569  #define MSR_PLATFORM_INFO_PPIN  (1 << 23)
 570  570  
 571  571  #define MSR_PPIN_CTL_AMD        0xC00102F0
 572  572  #define MSR_PPIN_AMD            0xC00102F1
 573  573  
 574  574  /*
 575  575   * These values are currently the same between Intel and AMD.
 576  576   */
 577  577  #define MSR_PPIN_CTL_MASK       0x03
 578  578  #define MSR_PPIN_CTL_DISABLED   0x00
 579  579  #define MSR_PPIN_CTL_LOCKED     0x01
 580  580  #define MSR_PPIN_CTL_ENABLED    0x02
 581  581  
 582  582  /*
 583  583   * Intel IA32_ARCH_CAPABILITIES MSR.
 584  584   */
 585  585  #define MSR_IA32_ARCH_CAPABILITIES              0x10a
 586  586  #define IA32_ARCH_CAP_RDCL_NO                   0x0001
 587  587  #define IA32_ARCH_CAP_IBRS_ALL                  0x0002
 588  588  #define IA32_ARCH_CAP_RSBA                      0x0004
 589  589  #define IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY        0x0008
 590  590  #define IA32_ARCH_CAP_SSB_NO                    0x0010
 591  591  #define IA32_ARCH_CAP_MDS_NO                    0x0020
 592  592  #define IA32_ARCH_CAP_IF_PSCHANGE_MC_NO         0x0040
 593  593  #define IA32_ARCH_CAP_TSX_CTRL                  0x0080
 594  594  #define IA32_ARCH_CAP_TAA_NO                    0x0100
 595  595  
 596  596  /*
 597  597   * Intel Speculation related MSRs
 598  598   */
 599  599  #define MSR_IA32_SPEC_CTRL      0x48
 600  600  #define IA32_SPEC_CTRL_IBRS     0x01
 601  601  #define IA32_SPEC_CTRL_STIBP    0x02
 602  602  #define IA32_SPEC_CTRL_SSBD     0x04
 603  603  
 604  604  #define MSR_IA32_PRED_CMD       0x49
 605  605  #define IA32_PRED_CMD_IBPB      0x01
 606  606  
 607  607  #define MSR_IA32_FLUSH_CMD      0x10b
 608  608  #define IA32_FLUSH_CMD_L1D      0x01
 609  609  
 610  610  /*
 611  611   * Intel VMX related MSRs
 612  612   */
 613  613  #define MSR_IA32_FEAT_CTRL      0x03a
 614  614  #define IA32_FEAT_CTRL_LOCK     0x1
 615  615  #define IA32_FEAT_CTRL_SMX_EN   0x2
 616  616  #define IA32_FEAT_CTRL_VMX_EN   0x4
 617  617  
 618  618  #define MSR_IA32_VMX_BASIC              0x480
 619  619  #define IA32_VMX_BASIC_INS_OUTS         (1UL << 54)
 620  620  #define IA32_VMX_BASIC_TRUE_CTRLS       (1UL << 55)
 621  621  
 622  622  #define MSR_IA32_VMX_PROCBASED_CTLS             0x482
 623  623  #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS        0x48e
 624  624  #define IA32_VMX_PROCBASED_2ND_CTLS     (1UL << 31)
 625  625  
 626  626  #define MSR_IA32_VMX_PROCBASED2_CTLS    0x48b
 627  627  #define IA32_VMX_PROCBASED2_EPT         (1UL << 1)
 628  628  #define IA32_VMX_PROCBASED2_VPID        (1UL << 5)
 629  629  
 630  630  #define MSR_IA32_VMX_EPT_VPID_CAP       0x48c
 631  631  #define IA32_VMX_EPT_VPID_EXEC_ONLY             (1UL << 0)
 632  632  #define IA32_VMX_EPT_VPID_PWL4                  (1UL << 6)
 633  633  #define IA32_VMX_EPT_VPID_TYPE_UC               (1UL << 8)
 634  634  #define IA32_VMX_EPT_VPID_TYPE_WB               (1UL << 14)
 635  635  #define IA32_VMX_EPT_VPID_MAP_2M                (1UL << 16)
 636  636  #define IA32_VMX_EPT_VPID_MAP_1G                (1UL << 17)
 637  637  #define IA32_VMX_EPT_VPID_HW_AD                 (1UL << 21)
 638  638  #define IA32_VMX_EPT_VPID_INVEPT                (1UL << 20)
 639  639  #define IA32_VMX_EPT_VPID_INVEPT_SINGLE         (1UL << 25)
 640  640  #define IA32_VMX_EPT_VPID_INVEPT_ALL            (1UL << 26)
 641  641  #define IA32_VMX_EPT_VPID_INVVPID               (1UL << 32)
 642  642  #define IA32_VMX_EPT_VPID_INVVPID_ADDR          (1UL << 40)
 643  643  #define IA32_VMX_EPT_VPID_INVVPID_SINGLE        (1UL << 41)
 644  644  #define IA32_VMX_EPT_VPID_INVVPID_ALL           (1UL << 42)
 645  645  #define IA32_VMX_EPT_VPID_INVVPID_RETAIN        (1UL << 43)
 646  646  
 647  647  /*
 648  648   * Intel TSX Control MSRs
 649  649   */
 650  650  #define MSR_IA32_TSX_CTRL               0x122
 651  651  #define IA32_TSX_CTRL_RTM_DISABLE       0x01
 652  652  #define IA32_TSX_CTRL_CPUID_CLEAR       0x02
 653  653  
 654  654  /*
 655  655   * Intel Thermal MSRs
 656  656   */
 657  657  #define MSR_IA32_THERM_INTERRUPT        0x19b
 658  658  #define IA32_THERM_INTERRUPT_HIGH_IE    0x00000001
 659  659  #define IA32_THERM_INTERRUPT_LOW_IE     0x00000002
 660  660  #define IA32_THERM_INTERRUPT_PROCHOT_IE 0x00000004
 661  661  #define IA32_THERM_INTERRUPT_FORCEPR_IE 0x00000008
 662  662  #define IA32_THERM_INTERRUPT_CRIT_IE    0x00000010
 663  663  #define IA32_THERM_INTERRUPT_TR1_VAL(x) (((x) >> 8) & 0x7f)
 664  664  #define IA32_THERM_INTTERUPT_TR1_IE     0x00008000
 665  665  #define IA32_THERM_INTTERUPT_TR2_VAL(x) (((x) >> 16) & 0x7f)
 666  666  #define IA32_THERM_INTERRUPT_TR2_IE     0x00800000
 667  667  #define IA32_THERM_INTERRUPT_PL_NE      0x01000000
 668  668  
 669  669  #define MSR_IA32_THERM_STATUS           0x19c
 670  670  #define IA32_THERM_STATUS_STATUS                0x00000001
 671  671  #define IA32_THERM_STATUS_STATUS_LOG            0x00000002
 672  672  #define IA32_THERM_STATUS_PROCHOT               0x00000004
 673  673  #define IA32_THERM_STATUS_PROCHOT_LOG           0x00000008
 674  674  #define IA32_THERM_STATUS_CRIT_STATUS           0x00000010
 675  675  #define IA32_THERM_STATUS_CRIT_LOG              0x00000020
 676  676  #define IA32_THERM_STATUS_TR1_STATUS            0x00000040
 677  677  #define IA32_THERM_STATUS_TR1_LOG               0x00000080
 678  678  #define IA32_THERM_STATUS_TR2_STATUS            0x00000100
 679  679  #define IA32_THERM_STATUS_TR2_LOG               0x00000200
 680  680  #define IA32_THERM_STATUS_POWER_LIMIT_STATUS    0x00000400
 681  681  #define IA32_THERM_STATUS_POWER_LIMIT_LOG       0x00000800
 682  682  #define IA32_THERM_STATUS_CURRENT_STATUS        0x00001000
 683  683  #define IA32_THERM_STATUS_CURRENT_LOG           0x00002000
 684  684  #define IA32_THERM_STATUS_CROSS_DOMAIN_STATUS   0x00004000
 685  685  #define IA32_THERM_STATUS_CROSS_DOMAIN_LOG      0x00008000
 686  686  #define IA32_THERM_STATUS_READING(x)            (((x) >> 16) & 0x7f)
 687  687  #define IA32_THERM_STATUS_RESOLUTION(x)         (((x) >> 27) & 0x0f)
 688  688  #define IA32_THERM_STATUS_READ_VALID            0x80000000
 689  689  
 690  690  #define MSR_TEMPERATURE_TARGET          0x1a2
 691  691  #define MSR_TEMPERATURE_TARGET_TARGET(x)        (((x) >> 16) & 0xff)
 692  692  /*
 693  693   * Not all models support the offset. Refer to the Intel SDM Volume 4 for a list
 694  694   * of which models have support for which bits.
 695  695   */
 696  696  #define MSR_TEMPERATURE_TARGET_OFFSET(x)        (((x) >> 24) & 0x0f)
 697  697  
 698  698  #define MSR_IA32_PACKAGE_THERM_STATUS           0x1b1
 699  699  #define IA32_PKG_THERM_STATUS_STATUS            0x00000001
 700  700  #define IA32_PKG_THERM_STATUS_STATUS_LOG        0x00000002
 701  701  #define IA32_PKG_THERM_STATUS_PROCHOT           0x00000004
 702  702  #define IA32_PKG_THERM_STATUS_PROCHOT_LOG       0x00000008
 703  703  #define IA32_PKG_THERM_STATUS_CRIT_STATUS       0x00000010
 704  704  #define IA32_PKG_THERM_STATUS_CRIT_LOG          0x00000020
 705  705  #define IA32_PKG_THERM_STATUS_TR1_STATUS        0x00000040
 706  706  #define IA32_PKG_THERM_STATUS_TR1_LOG           0x00000080
 707  707  #define IA32_PKG_THERM_STATUS_TR2_STATUS        0x00000100
 708  708  #define IA32_PKG_THERM_STATUS_TR2_LOG           0x00000200
 709  709  #define IA32_PKG_THERM_STATUS_READING(x)        (((x) >> 16) & 0x7f)
 710  710  
 711  711  #define MSR_IA32_PACKAGE_THERM_INTERRUPT        0x1b2
 712  712  #define IA32_PKG_THERM_INTERRUPT_HIGH_IE        0x00000001
 713  713  #define IA32_PKG_THERM_INTERRUPT_LOW_IE         0x00000002
 714  714  #define IA32_PKG_THERM_INTERRUPT_PROCHOT_IE     0x00000004
 715  715  #define IA32_PKG_THERM_INTERRUPT_OVERHEAT_IE    0x00000010
 716  716  #define IA32_PKG_THERM_INTERRUPT_TR1_VAL(x)     (((x) >> 8) & 0x7f)
 717  717  #define IA32_PKG_THERM_INTTERUPT_TR1_IE         0x00008000
 718  718  #define IA32_PKG_THERM_INTTERUPT_TR2_VAL(x)     (((x) >> 16) & 0x7f)
 719  719  #define IA32_PKG_THERM_INTERRUPT_TR2_IE         0x00800000
 720  720  #define IA32_PKG_THERM_INTERRUPT_PL_NE          0x01000000
 721  721  
 722  722  /*
 723  723   * AMD TOM and TOM2 MSRs. These control the split between DRAM and MMIO below
 724  724   * and above 4 GiB respectively. These have existed since family 0xf.
 725  725   */
 726  726  #define MSR_AMD_TOM                             0xc001001a
 727  727  #define MSR_AMD_TOM_MASK(x)                     ((x) & 0xffffff800000)
 728  728  #define MSR_AMD_TOM2                            0xc001001d
 729  729  #define MSR_AMD_TOM2_MASK(x)                    ((x) & 0xffffff800000)
 730  730  
 731  731  
 732  732  #define MCI_CTL_VALUE           0xffffffff
 733  733  
 734  734  #define MTRR_TYPE_UC            0
 735  735  #define MTRR_TYPE_WC            1
 736  736  #define MTRR_TYPE_WT            4
 737  737  #define MTRR_TYPE_WP            5
 738  738  #define MTRR_TYPE_WB            6
 739  739  #define MTRR_TYPE_UC_           7
 740  740  
 741  741  /*
 742  742   * For Solaris we set up the page attritubute table in the following way:
 743  743   * PAT0 Write-Back
 744  744   * PAT1 Write-Through
 745  745   * PAT2 Unchacheable-
 746  746   * PAT3 Uncacheable
 747  747   * PAT4 Write-Back
 748  748   * PAT5 Write-Through
 749  749   * PAT6 Write-Combine
 750  750   * PAT7 Uncacheable
 751  751   * The only difference from h/w default is entry 6.
 752  752   */
 753  753  #define PAT_DEFAULT_ATTRIBUTE                   \
 754  754          ((uint64_t)MTRR_TYPE_WB |               \
 755  755          ((uint64_t)MTRR_TYPE_WT << 8) |         \
 756  756          ((uint64_t)MTRR_TYPE_UC_ << 16) |       \
 757  757          ((uint64_t)MTRR_TYPE_UC << 24) |        \
 758  758          ((uint64_t)MTRR_TYPE_WB << 32) |        \
 759  759          ((uint64_t)MTRR_TYPE_WT << 40) |        \
 760  760          ((uint64_t)MTRR_TYPE_WC << 48) |        \
 761  761          ((uint64_t)MTRR_TYPE_UC << 56))
 762  762  
 763  763  #define X86FSET_LARGEPAGE       0
 764  764  #define X86FSET_TSC             1
 765  765  #define X86FSET_MSR             2
 766  766  #define X86FSET_MTRR            3
 767  767  #define X86FSET_PGE             4
 768  768  #define X86FSET_DE              5
 769  769  #define X86FSET_CMOV            6
 770  770  #define X86FSET_MMX             7
 771  771  #define X86FSET_MCA             8
 772  772  #define X86FSET_PAE             9
 773  773  #define X86FSET_CX8             10
 774  774  #define X86FSET_PAT             11
 775  775  #define X86FSET_SEP             12
 776  776  #define X86FSET_SSE             13
 777  777  #define X86FSET_SSE2            14
 778  778  #define X86FSET_HTT             15
 779  779  #define X86FSET_ASYSC           16
 780  780  #define X86FSET_NX              17
 781  781  #define X86FSET_SSE3            18
 782  782  #define X86FSET_CX16            19
 783  783  #define X86FSET_CMP             20
 784  784  #define X86FSET_TSCP            21
 785  785  #define X86FSET_MWAIT           22
 786  786  #define X86FSET_SSE4A           23
 787  787  #define X86FSET_CPUID           24
 788  788  #define X86FSET_SSSE3           25
 789  789  #define X86FSET_SSE4_1          26
 790  790  #define X86FSET_SSE4_2          27
 791  791  #define X86FSET_1GPG            28
 792  792  #define X86FSET_CLFSH           29
 793  793  #define X86FSET_64              30
 794  794  #define X86FSET_AES             31
 795  795  #define X86FSET_PCLMULQDQ       32
 796  796  #define X86FSET_XSAVE           33
 797  797  #define X86FSET_AVX             34
 798  798  #define X86FSET_VMX             35
 799  799  #define X86FSET_SVM             36
 800  800  #define X86FSET_TOPOEXT         37
 801  801  #define X86FSET_F16C            38
 802  802  #define X86FSET_RDRAND          39
 803  803  #define X86FSET_X2APIC          40
 804  804  #define X86FSET_AVX2            41
 805  805  #define X86FSET_BMI1            42
 806  806  #define X86FSET_BMI2            43
 807  807  #define X86FSET_FMA             44
 808  808  #define X86FSET_SMEP            45
 809  809  #define X86FSET_SMAP            46
 810  810  #define X86FSET_ADX             47
 811  811  #define X86FSET_RDSEED          48
 812  812  #define X86FSET_MPX             49
 813  813  #define X86FSET_AVX512F         50
 814  814  #define X86FSET_AVX512DQ        51
 815  815  #define X86FSET_AVX512PF        52
 816  816  #define X86FSET_AVX512ER        53
 817  817  #define X86FSET_AVX512CD        54
 818  818  #define X86FSET_AVX512BW        55
 819  819  #define X86FSET_AVX512VL        56
 820  820  #define X86FSET_AVX512FMA       57
 821  821  #define X86FSET_AVX512VBMI      58
 822  822  #define X86FSET_AVX512VPOPCDQ   59
 823  823  #define X86FSET_AVX512NNIW      60
 824  824  #define X86FSET_AVX512FMAPS     61
 825  825  #define X86FSET_XSAVEOPT        62
 826  826  #define X86FSET_XSAVEC          63
 827  827  #define X86FSET_XSAVES          64
 828  828  #define X86FSET_SHA             65
 829  829  #define X86FSET_UMIP            66
 830  830  #define X86FSET_PKU             67
 831  831  #define X86FSET_OSPKE           68
 832  832  #define X86FSET_PCID            69
 833  833  #define X86FSET_INVPCID         70
 834  834  #define X86FSET_IBRS            71
 835  835  #define X86FSET_IBPB            72
 836  836  #define X86FSET_STIBP           73
 837  837  #define X86FSET_SSBD            74
 838  838  #define X86FSET_SSBD_VIRT       75
 839  839  #define X86FSET_RDCL_NO         76
 840  840  #define X86FSET_IBRS_ALL        77
 841  841  #define X86FSET_RSBA            78
 842  842  #define X86FSET_SSB_NO          79
 843  843  #define X86FSET_STIBP_ALL       80
 844  844  #define X86FSET_FLUSH_CMD       81
 845  845  #define X86FSET_L1D_VM_NO       82
 846  846  #define X86FSET_FSGSBASE        83
 847  847  #define X86FSET_CLFLUSHOPT      84
 848  848  #define X86FSET_CLWB            85
 849  849  #define X86FSET_MONITORX        86
 850  850  #define X86FSET_CLZERO          87
 851  851  #define X86FSET_XOP             88
 852  852  #define X86FSET_FMA4            89
 853  853  #define X86FSET_TBM             90
 854  854  #define X86FSET_AVX512VNNI      91
 855  855  #define X86FSET_AMD_PCEC        92
 856  856  #define X86FSET_MD_CLEAR        93
 857  857  #define X86FSET_MDS_NO          94
 858  858  #define X86FSET_CORE_THERMAL    95
 859  859  #define X86FSET_PKG_THERMAL     96
 860  860  #define X86FSET_TSX_CTRL        97
 861  861  #define X86FSET_TAA_NO          98
 862  862  #define X86FSET_PPIN            99
 863  863  #define X86FSET_VAES            100
 864  864  #define X86FSET_VPCLMULQDQ      101
 865  865  #define X86FSET_LFENCE_SER      102
 866  866  #define X86FSET_GFNI            103
 867  867  #define X86FSET_AVX512_VP2INT   104
 868  868  #define X86FSET_AVX512_BITALG   105
 869  869  #define X86FSET_AVX512_VBMI2    106
 870  870  #define X86FSET_AVX512_BF16     107
 871  871  #define X86FSET_AUTO_IBRS       108
 872  872  
 873  873  /*
 874  874   * Intel Deep C-State invariant TSC in leaf 0x80000007.
 875  875   */
 876  876  #define CPUID_TSC_CSTATE_INVARIANCE     (0x100)
 877  877  
 878  878  /*
 879  879   * Intel TSC deadline timer
 880  880   */
 881  881  #define CPUID_DEADLINE_TSC      (1 << 24)
 882  882  
 883  883  /*
 884  884   * x86_type is a legacy concept; this is supplanted
 885  885   * for most purposes by x86_featureset; modern CPUs
 886  886   * should be X86_TYPE_OTHER
 887  887   */
 888  888  #define X86_TYPE_OTHER          0
 889  889  #define X86_TYPE_486            1
 890  890  #define X86_TYPE_P5             2
 891  891  #define X86_TYPE_P6             3
 892  892  #define X86_TYPE_CYRIX_486      4
 893  893  #define X86_TYPE_CYRIX_6x86L    5
 894  894  #define X86_TYPE_CYRIX_6x86     6
 895  895  #define X86_TYPE_CYRIX_GXm      7
 896  896  #define X86_TYPE_CYRIX_6x86MX   8
 897  897  #define X86_TYPE_CYRIX_MediaGX  9
 898  898  #define X86_TYPE_CYRIX_MII      10
 899  899  #define X86_TYPE_VIA_CYRIX_III  11
 900  900  #define X86_TYPE_P4             12
 901  901  
 902  902  /*
 903  903   * x86_vendor allows us to select between
 904  904   * implementation features and helps guide
 905  905   * the interpretation of the cpuid instruction.
 906  906   */
 907  907  #define X86_VENDOR_Intel        0
 908  908  #define X86_VENDORSTR_Intel     "GenuineIntel"
 909  909  
 910  910  #define X86_VENDOR_IntelClone   1
 911  911  
 912  912  #define X86_VENDOR_AMD          2
 913  913  #define X86_VENDORSTR_AMD       "AuthenticAMD"
 914  914  
 915  915  #define X86_VENDOR_Cyrix        3
 916  916  #define X86_VENDORSTR_CYRIX     "CyrixInstead"
 917  917  
 918  918  #define X86_VENDOR_UMC          4
 919  919  #define X86_VENDORSTR_UMC       "UMC UMC UMC "
 920  920  
 921  921  #define X86_VENDOR_NexGen       5
 922  922  #define X86_VENDORSTR_NexGen    "NexGenDriven"
 923  923  
 924  924  #define X86_VENDOR_Centaur      6
 925  925  #define X86_VENDORSTR_Centaur   "CentaurHauls"
 926  926  
 927  927  #define X86_VENDOR_Rise         7
 928  928  #define X86_VENDORSTR_Rise      "RiseRiseRise"
 929  929  
 930  930  #define X86_VENDOR_SiS          8
 931  931  #define X86_VENDORSTR_SiS       "SiS SiS SiS "
 932  932  
 933  933  #define X86_VENDOR_TM           9
 934  934  #define X86_VENDORSTR_TM        "GenuineTMx86"
 935  935  
 936  936  #define X86_VENDOR_NSC          10
 937  937  #define X86_VENDORSTR_NSC       "Geode by NSC"
 938  938  
 939  939  #define X86_VENDOR_HYGON        11
 940  940  #define X86_VENDORSTR_HYGON     "HygonGenuine"
 941  941  
 942  942  /*
 943  943   * Vendor string max len + \0
 944  944   */
 945  945  #define X86_VENDOR_STRLEN       13
 946  946  
 947  947  /*
 948  948   * For lookups and matching functions only; not an actual vendor.
 949  949   */
 950  950  #define _X86_VENDOR_MATCH_ALL   0xff
 951  951  
 952  952  /*
 953  953   * See the big theory statement at the top of cpuid.c for information about how
 954  954   * processor families and microarchitecture families relate to cpuid families,
 955  955   * models, and steppings.
 956  956   */
 957  957  
 958  958  #define _X86_CHIPREV_VENDOR_SHIFT       24
 959  959  #define _X86_CHIPREV_FAMILY_SHIFT       16
 960  960  
 961  961  #define _X86_CHIPREV_VENDOR(x)          \
 962  962          bitx32((uint32_t)(x), 31, _X86_CHIPREV_VENDOR_SHIFT)
 963  963  
 964  964  #define _X86_CHIPREV_FAMILY(x)          \
 965  965          bitx32((uint32_t)(x), 23, _X86_CHIPREV_FAMILY_SHIFT)
 966  966  
 967  967  #define _X86_CHIPREV_REV(x) \
 968  968          bitx32((uint32_t)(x), 15, 0)
 969  969  
 970  970  #define _X86_CHIPREV_MKREV(vendor, family, rev) \
 971  971          ((uint32_t)(vendor) << _X86_CHIPREV_VENDOR_SHIFT | \
 972  972          (uint32_t)(family) << _X86_CHIPREV_FAMILY_SHIFT | (uint32_t)(rev))
 973  973  
 974  974  /*
 975  975   * The legacy families here are a little bit unfortunate.  Part of this is that
 976  976   * the way AMD used the cpuid family/model/stepping changed somewhat over time,
 977  977   * but the more immediate reason it's this way is more that the way we use
 978  978   * chiprev/processor family changed with it.  The ancient amd_opteron and mc-amd
 979  979   * drivers used the chiprevs that were based on cpuid family, mainly 0xf and
 980  980   * 0x10.  amdzen_umc wants the processor family, in part because AMD's
 981  981   * overloading of the cpuid family has made it effectively useless for
 982  982   * discerning anything about the processor.  That also tied into the way
 983  983   * amd_revmap was previously organised in cpuid_subr.c: up to family 0x14
 984  984   * everything was just "rev A", "rev B", etc.; afterward we started using the
 985  985   * new shorthand, again tied to how AMD was presenting this information.
 986  986   * Because there are other consumers of the processor family, it no longer made
 987  987   * sense for amdzen to derive the processor family from the cpuid family/model
 988  988   * given that we have this collection of definitions already and code in
 989  989   * cpuid_subr.c to make use of them.  The result is this unified approach that
 990  990   * tries to keep old consumers happy while allowing new ones to get the degree
 991  991   * of detail they need and expect.  That required bending things a bit to make
 992  992   * them fit, though critically as long as AMD keep on their current path and all
 993  993   * new consumers look like the ones we are adding these days, we will be able to
 994  994   * continue making new additions that will match all the recent ones and the way
 995  995   * AMD are currently using families and models.  There is absolutely no reason
 996  996   * we couldn't go back and dig through all the legacy parts and break them down
 997  997   * the same way, then change the old MC and CPU drivers to match, but I didn't
 998  998   * feel like doing a lot of work for processors that it's unlikely anyone is
 999  999   * still using and even more unlikely anyone will introduce new code to support.
1000 1000   * My compromise was to flesh things out starting where we already had more
1001 1001   * detail even if nothing was consuming it programmatically: at 0x15.  Before
1002 1002   * that, processor family and cpuid family were effectively the same, because
1003 1003   * that's what those old consumers expect.
1004 1004   */
1005 1005  
1006 1006  #ifndef _ASM
1007 1007  typedef enum x86_processor_family {
1008 1008          X86_PF_UNKNOWN,
1009 1009          X86_PF_AMD_LEGACY_F = 0xf,
1010 1010          X86_PF_AMD_LEGACY_10 = 0x10,
1011 1011          X86_PF_AMD_LEGACY_11 = 0x11,
1012 1012          X86_PF_AMD_LEGACY_12 = 0x12,
1013 1013          X86_PF_AMD_LEGACY_14 = 0x14,
1014 1014          X86_PF_AMD_OROCHI,
1015 1015          X86_PF_AMD_TRINITY,
1016 1016          X86_PF_AMD_KAVERI,
1017 1017          X86_PF_AMD_CARRIZO,
1018 1018          X86_PF_AMD_STONEY_RIDGE,
1019 1019          X86_PF_AMD_KABINI,
1020 1020          X86_PF_AMD_MULLINS,
1021 1021          X86_PF_AMD_NAPLES,
1022 1022          X86_PF_AMD_PINNACLE_RIDGE,
1023 1023          X86_PF_AMD_RAVEN_RIDGE,
1024 1024          X86_PF_AMD_PICASSO,
1025 1025          X86_PF_AMD_DALI,
1026 1026          X86_PF_AMD_ROME,
1027 1027          X86_PF_AMD_RENOIR,
1028 1028          X86_PF_AMD_MATISSE,
1029 1029          X86_PF_AMD_VAN_GOGH,
1030 1030          X86_PF_AMD_MENDOCINO,
1031 1031          X86_PF_HYGON_DHYANA,
1032 1032          X86_PF_AMD_MILAN,
1033 1033          X86_PF_AMD_GENOA,
1034 1034          X86_PF_AMD_VERMEER,
1035 1035          X86_PF_AMD_REMBRANDT,
1036 1036          X86_PF_AMD_CEZANNE,
1037 1037          X86_PF_AMD_RAPHAEL,
1038 1038          X86_PF_AMD_PHOENIX,
1039 1039          X86_PF_AMD_BERGAMO,
1040 1040  
1041 1041          X86_PF_ANY = 0xff
1042 1042  } x86_processor_family_t;
1043 1043  
1044 1044  #define _DECL_CHIPREV(_v, _f, _revn, _revb)     \
1045 1045          X86_CHIPREV_ ## _v ## _ ## _f ## _ ## _revn =   \
1046 1046          _X86_CHIPREV_MKREV(X86_VENDOR_ ## _v, X86_PF_ ## _v ## _ ## _f, _revb)
1047 1047  
1048 1048  #define _X86_CHIPREV_REV_MATCH_ALL      0xffff
1049 1049  
1050 1050  typedef enum x86_chiprev {
1051 1051          X86_CHIPREV_UNKNOWN,
1052 1052          _DECL_CHIPREV(AMD, LEGACY_F, REV_B, 0x0001),
1053 1053          /*
1054 1054           * Definitions for AMD Family 0xf. Minor revisions C0 and CG are
1055 1055           * sufficiently different that we will distinguish them; in all other
1056 1056           * case we will identify the major revision.
1057 1057           */
1058 1058          _DECL_CHIPREV(AMD, LEGACY_F, REV_C0, 0x0002),
1059 1059          _DECL_CHIPREV(AMD, LEGACY_F, REV_CG, 0x0004),
1060 1060          _DECL_CHIPREV(AMD, LEGACY_F, REV_D, 0x0008),
1061 1061          _DECL_CHIPREV(AMD, LEGACY_F, REV_E, 0x0010),
1062 1062          _DECL_CHIPREV(AMD, LEGACY_F, REV_F, 0x0020),
1063 1063          _DECL_CHIPREV(AMD, LEGACY_F, REV_G, 0x0040),
1064 1064          _DECL_CHIPREV(AMD, LEGACY_F, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1065 1065  
1066 1066          _DECL_CHIPREV(AMD, LEGACY_10, UNKNOWN, 0x0001),
1067 1067          _DECL_CHIPREV(AMD, LEGACY_10, REV_A, 0x0002),
1068 1068          _DECL_CHIPREV(AMD, LEGACY_10, REV_B, 0x0004),
1069 1069          _DECL_CHIPREV(AMD, LEGACY_10, REV_C2, 0x0008),
1070 1070          _DECL_CHIPREV(AMD, LEGACY_10, REV_C3, 0x0010),
1071 1071          _DECL_CHIPREV(AMD, LEGACY_10, REV_D0, 0x0020),
1072 1072          _DECL_CHIPREV(AMD, LEGACY_10, REV_D1, 0x0040),
1073 1073          _DECL_CHIPREV(AMD, LEGACY_10, REV_E, 0x0080),
1074 1074          _DECL_CHIPREV(AMD, LEGACY_10, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1075 1075  
1076 1076          _DECL_CHIPREV(AMD, LEGACY_11, UNKNOWN, 0x0001),
1077 1077          _DECL_CHIPREV(AMD, LEGACY_11, REV_B, 0x0002),
1078 1078          _DECL_CHIPREV(AMD, LEGACY_11, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1079 1079  
1080 1080          _DECL_CHIPREV(AMD, LEGACY_12, UNKNOWN, 0x0001),
1081 1081          _DECL_CHIPREV(AMD, LEGACY_12, REV_B, 0x0002),
1082 1082          _DECL_CHIPREV(AMD, LEGACY_12, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1083 1083  
1084 1084          _DECL_CHIPREV(AMD, LEGACY_14, UNKNOWN, 0x0001),
1085 1085          _DECL_CHIPREV(AMD, LEGACY_14, REV_B, 0x0002),
1086 1086          _DECL_CHIPREV(AMD, LEGACY_14, REV_C, 0x0004),
1087 1087          _DECL_CHIPREV(AMD, LEGACY_14, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1088 1088  
1089 1089          _DECL_CHIPREV(AMD, OROCHI, UNKNOWN, 0x0001),
1090 1090          _DECL_CHIPREV(AMD, OROCHI, REV_B2, 0x0002),
1091 1091          _DECL_CHIPREV(AMD, OROCHI, REV_C0, 0x0004),
1092 1092          _DECL_CHIPREV(AMD, OROCHI, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1093 1093  
1094 1094          _DECL_CHIPREV(AMD, TRINITY, UNKNOWN, 0x0001),
1095 1095          _DECL_CHIPREV(AMD, TRINITY, REV_A1, 0x0002),
1096 1096          _DECL_CHIPREV(AMD, TRINITY, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1097 1097  
1098 1098          _DECL_CHIPREV(AMD, KAVERI, UNKNOWN, 0x0001),
1099 1099          _DECL_CHIPREV(AMD, KAVERI, REV_A1, 0x0002),
1100 1100          _DECL_CHIPREV(AMD, KAVERI, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1101 1101  
1102 1102          _DECL_CHIPREV(AMD, CARRIZO, UNKNOWN, 0x0001),
1103 1103          _DECL_CHIPREV(AMD, CARRIZO, REV_A0, 0x0002),
1104 1104          _DECL_CHIPREV(AMD, CARRIZO, REV_A1, 0x0004),
1105 1105          _DECL_CHIPREV(AMD, CARRIZO, REV_DDR4, 0x0008),
1106 1106          _DECL_CHIPREV(AMD, CARRIZO, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1107 1107  
1108 1108          _DECL_CHIPREV(AMD, STONEY_RIDGE, UNKNOWN, 0x0001),
1109 1109          _DECL_CHIPREV(AMD, STONEY_RIDGE, REV_A0, 0x0002),
1110 1110          _DECL_CHIPREV(AMD, STONEY_RIDGE, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1111 1111  
1112 1112          _DECL_CHIPREV(AMD, KABINI, UNKNOWN, 0x0001),
1113 1113          _DECL_CHIPREV(AMD, KABINI, A1, 0x0002),
1114 1114          _DECL_CHIPREV(AMD, KABINI, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1115 1115  
1116 1116          _DECL_CHIPREV(AMD, MULLINS, UNKNOWN, 0x0001),
1117 1117          _DECL_CHIPREV(AMD, MULLINS, A1, 0x0002),
1118 1118          _DECL_CHIPREV(AMD, MULLINS, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1119 1119  
1120 1120          _DECL_CHIPREV(AMD, NAPLES, UNKNOWN, 0x0001),
1121 1121          _DECL_CHIPREV(AMD, NAPLES, A0, 0x0002),
1122 1122          _DECL_CHIPREV(AMD, NAPLES, B1, 0x0004),
1123 1123          _DECL_CHIPREV(AMD, NAPLES, B2, 0x0008),
1124 1124          _DECL_CHIPREV(AMD, NAPLES, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1125 1125  
1126 1126          _DECL_CHIPREV(AMD, PINNACLE_RIDGE, UNKNOWN, 0x0001),
1127 1127          _DECL_CHIPREV(AMD, PINNACLE_RIDGE, B2, 0x0002),
1128 1128          _DECL_CHIPREV(AMD, PINNACLE_RIDGE, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1129 1129  
1130 1130          _DECL_CHIPREV(AMD, RAVEN_RIDGE, UNKNOWN, 0x0001),
1131 1131          _DECL_CHIPREV(AMD, RAVEN_RIDGE, B0, 0x0002),
1132 1132          _DECL_CHIPREV(AMD, RAVEN_RIDGE, B1, 0x0004),
1133 1133          _DECL_CHIPREV(AMD, RAVEN_RIDGE, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1134 1134  
1135 1135          _DECL_CHIPREV(AMD, PICASSO, UNKNOWN, 0x0001),
1136 1136          _DECL_CHIPREV(AMD, PICASSO, B1, 0x0002),
1137 1137          _DECL_CHIPREV(AMD, PICASSO, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1138 1138  
1139 1139          _DECL_CHIPREV(AMD, DALI, UNKNOWN, 0x0001),
1140 1140          _DECL_CHIPREV(AMD, DALI, A1, 0x0002),
1141 1141          _DECL_CHIPREV(AMD, DALI, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1142 1142  
1143 1143          _DECL_CHIPREV(AMD, ROME, UNKNOWN, 0x0001),
1144 1144          _DECL_CHIPREV(AMD, ROME, A0, 0x0002),
1145 1145          _DECL_CHIPREV(AMD, ROME, B0, 0x0004),
1146 1146          _DECL_CHIPREV(AMD, ROME, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1147 1147  
1148 1148          _DECL_CHIPREV(AMD, RENOIR, UNKNOWN, 0x0001),
1149 1149          _DECL_CHIPREV(AMD, RENOIR, A1, 0x0002),
1150 1150          _DECL_CHIPREV(AMD, RENOIR, LCN_A1, 0x0004),
1151 1151          _DECL_CHIPREV(AMD, RENOIR, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1152 1152  
1153 1153          _DECL_CHIPREV(AMD, MATISSE, UNKNOWN, 0x0001),
1154 1154          _DECL_CHIPREV(AMD, MATISSE, B0, 0x0002),
1155 1155          _DECL_CHIPREV(AMD, MATISSE, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1156 1156  
1157 1157          _DECL_CHIPREV(AMD, VAN_GOGH, UNKNOWN, 0x0001),
1158 1158          _DECL_CHIPREV(AMD, VAN_GOGH, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1159 1159  
1160 1160          _DECL_CHIPREV(AMD, MENDOCINO, UNKNOWN, 0x0001),
1161 1161          _DECL_CHIPREV(AMD, MENDOCINO, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1162 1162  
1163 1163          _DECL_CHIPREV(HYGON, DHYANA, UNKNOWN, 0x0001),
1164 1164          _DECL_CHIPREV(HYGON, DHYANA, A1, 0x0002),
1165 1165          _DECL_CHIPREV(HYGON, DHYANA, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1166 1166  
1167 1167          _DECL_CHIPREV(AMD, MILAN, UNKNOWN, 0x0001),
1168 1168          _DECL_CHIPREV(AMD, MILAN, A0, 0x0002),
1169 1169          _DECL_CHIPREV(AMD, MILAN, B0, 0x0004),
1170 1170          _DECL_CHIPREV(AMD, MILAN, B1, 0x0008),
1171 1171          _DECL_CHIPREV(AMD, MILAN, B2, 0x0010),
1172 1172          _DECL_CHIPREV(AMD, MILAN, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1173 1173  
1174 1174          _DECL_CHIPREV(AMD, GENOA, UNKNOWN, 0x0001),
1175 1175          _DECL_CHIPREV(AMD, GENOA, A0, 0x0002),
1176 1176          _DECL_CHIPREV(AMD, GENOA, A1, 0x0004),
1177 1177          _DECL_CHIPREV(AMD, GENOA, B0, 0x0008),
1178 1178          _DECL_CHIPREV(AMD, GENOA, B1, 0x0010),
1179 1179          _DECL_CHIPREV(AMD, GENOA, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1180 1180  
1181 1181          _DECL_CHIPREV(AMD, VERMEER, UNKNOWN, 0x0001),
1182 1182          _DECL_CHIPREV(AMD, VERMEER, A0, 0x0002),
1183 1183          _DECL_CHIPREV(AMD, VERMEER, B0, 0x0004),
1184 1184          _DECL_CHIPREV(AMD, VERMEER, B2, 0x0008),        /* No B1 */
1185 1185          _DECL_CHIPREV(AMD, VERMEER, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1186 1186  
1187 1187          _DECL_CHIPREV(AMD, REMBRANDT, UNKNOWN, 0x0001),
1188 1188          _DECL_CHIPREV(AMD, REMBRANDT, A0, 0x0002),
1189 1189          _DECL_CHIPREV(AMD, REMBRANDT, B0, 0x0004),
1190 1190          _DECL_CHIPREV(AMD, REMBRANDT, B1, 0x0008),
1191 1191          _DECL_CHIPREV(AMD, REMBRANDT, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1192 1192  
1193 1193          _DECL_CHIPREV(AMD, CEZANNE, UNKNOWN, 0x0001),
1194 1194          _DECL_CHIPREV(AMD, CEZANNE, A0, 0x0002),
1195 1195          _DECL_CHIPREV(AMD, CEZANNE, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1196 1196  
1197 1197          _DECL_CHIPREV(AMD, RAPHAEL, UNKNOWN, 0x0001),
1198 1198          _DECL_CHIPREV(AMD, RAPHAEL, B2, 0x0002),
1199 1199          _DECL_CHIPREV(AMD, RAPHAEL, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1200 1200  
1201 1201          _DECL_CHIPREV(AMD, PHOENIX, UNKNOWN, 0x0001),
1202 1202          _DECL_CHIPREV(AMD, PHOENIX, A0, 0x0002),
1203 1203          _DECL_CHIPREV(AMD, PHOENIX, A1, 0x0004),
1204 1204          _DECL_CHIPREV(AMD, PHOENIX, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1205 1205  
1206 1206          _DECL_CHIPREV(AMD, BERGAMO, UNKNOWN, 0x0001),
1207 1207          _DECL_CHIPREV(AMD, BERGAMO, ANY, _X86_CHIPREV_REV_MATCH_ALL),
1208 1208  
1209 1209          /* Keep at the end */
1210 1210          X86_CHIPREV_ANY = _X86_CHIPREV_MKREV(_X86_VENDOR_MATCH_ALL, X86_PF_ANY,
1211 1211              _X86_CHIPREV_REV_MATCH_ALL)
1212 1212  } x86_chiprev_t;
1213 1213  
1214 1214  #undef  _DECL_CHIPREV
1215 1215  
1216 1216  /*
1217 1217   * Same thing, but for microarchitecture (core implementations).  We are not
1218 1218   * attempting to capture every possible fine-grained detail here; to the extent
1219 1219   * that it matters, we do so in cpuid.c via ISA/feature bits.  We use the same
1220 1220   * number of bits for each field as in chiprev.
1221 1221   */
1222 1222  
1223 1223  #define _X86_UARCHREV_VENDOR(x) _X86_CHIPREV_VENDOR(x)
1224 1224  #define _X86_UARCHREV_UARCH(x)  _X86_CHIPREV_FAMILY(x)
1225 1225  #define _X86_UARCHREV_REV(x)    _X86_CHIPREV_REV(x)
1226 1226  
1227 1227  #define _X86_UARCHREV_MKREV(vendor, family, rev) \
1228 1228          _X86_CHIPREV_MKREV(vendor, family, rev)
1229 1229  
1230 1230  typedef enum x86_uarch {
1231 1231          X86_UARCH_UNKNOWN,
1232 1232  
1233 1233          X86_UARCH_AMD_LEGACY,
1234 1234          X86_UARCH_AMD_ZEN1,
1235 1235          X86_UARCH_AMD_ZENPLUS,
1236 1236          X86_UARCH_AMD_ZEN2,
1237 1237          X86_UARCH_AMD_ZEN3,
1238 1238          X86_UARCH_AMD_ZEN4,
1239 1239  
1240 1240          X86_UARCH_ANY = 0xff
1241 1241  } x86_uarch_t;
1242 1242  
1243 1243  #define _DECL_UARCHREV(_v, _f, _revn, _revb)    \
1244 1244          X86_UARCHREV_ ## _v ## _ ## _f ## _ ## _revn =  \
1245 1245          _X86_UARCHREV_MKREV(X86_VENDOR_ ## _v, X86_UARCH_ ## _v ## _ ## _f, \
1246 1246          _revb)
1247 1247  
1248 1248  #define _DECL_UARCHREV_NOREV(_v, _f, _revb)     \
1249 1249          X86_UARCHREV_ ## _v ## _ ## _f =        \
1250 1250          _X86_UARCHREV_MKREV(X86_VENDOR_ ## _v, X86_UARCH_ ## _v ## _ ## _f, \
1251 1251          _revb)
1252 1252  
1253 1253  #define _X86_UARCHREV_REV_MATCH_ALL     0xffff
1254 1254  
1255 1255  typedef enum x86_uarchrev {
1256 1256          X86_UARCHREV_UNKNOWN,
1257 1257          _DECL_UARCHREV_NOREV(AMD, LEGACY, 0x0001),
1258 1258          _DECL_UARCHREV(AMD, LEGACY, ANY, _X86_UARCHREV_REV_MATCH_ALL),
1259 1259  
1260 1260          _DECL_UARCHREV_NOREV(AMD, ZEN1, 0x0001),
1261 1261          _DECL_UARCHREV(AMD, ZEN1, ANY, _X86_UARCHREV_REV_MATCH_ALL),
1262 1262  
1263 1263          _DECL_UARCHREV_NOREV(AMD, ZENPLUS, 0x0001),
1264 1264          _DECL_UARCHREV(AMD, ZENPLUS, ANY, _X86_UARCHREV_REV_MATCH_ALL),
1265 1265  
1266 1266          _DECL_UARCHREV(AMD, ZEN2, UNKNOWN, 0x0001),
1267 1267          _DECL_UARCHREV(AMD, ZEN2, A0, 0x0002),
1268 1268          _DECL_UARCHREV(AMD, ZEN2, B0, 0x0004),
1269 1269          _DECL_UARCHREV(AMD, ZEN2, ANY, _X86_UARCHREV_REV_MATCH_ALL),
1270 1270  
1271 1271          _DECL_UARCHREV(AMD, ZEN3, UNKNOWN, 0x0001),
1272 1272          _DECL_UARCHREV(AMD, ZEN3, A0, 0x0002),
1273 1273          _DECL_UARCHREV(AMD, ZEN3, B0, 0x0004),
1274 1274          _DECL_UARCHREV(AMD, ZEN3, B1, 0x0008),
1275 1275          _DECL_UARCHREV(AMD, ZEN3, B2, 0x0010),
1276 1276          _DECL_UARCHREV(AMD, ZEN3, ANY, _X86_UARCHREV_REV_MATCH_ALL),
1277 1277  
1278 1278          _DECL_UARCHREV_NOREV(AMD, ZEN4, 0x0001),
1279 1279          _DECL_UARCHREV(AMD, ZEN4, ANY, _X86_UARCHREV_REV_MATCH_ALL),
1280 1280  
1281 1281          /* Keep at the end */
1282 1282          _X86_UARCHREV_ANY = _X86_UARCHREV_MKREV(_X86_VENDOR_MATCH_ALL,
1283 1283              X86_UARCH_ANY, _X86_UARCHREV_REV_MATCH_ALL)
1284 1284  } x86_uarchrev_t;
1285 1285  
1286 1286  #undef  _DECL_UARCHREV
1287 1287  
1288 1288  #endif  /* !_ASM */
1289 1289  
1290 1290  /*
1291 1291   * Various socket/package types, extended as the need to distinguish
1292 1292   * a new type arises.  The top 8 byte identfies the vendor and the
1293 1293   * remaining 24 bits describe 24 socket types.
1294 1294   */
1295 1295  
1296 1296  #define _X86_SOCKET_VENDOR_SHIFT        24
1297 1297  #define _X86_SOCKET_VENDOR(x)   ((x) >> _X86_SOCKET_VENDOR_SHIFT)
1298 1298  #define _X86_SOCKET_TYPE_MASK   0x00ffffff
1299 1299  #define _X86_SOCKET_TYPE(x)             ((x) & _X86_SOCKET_TYPE_MASK)
1300 1300  
1301 1301  #define _X86_SOCKET_MKVAL(vendor, bitval) \
1302 1302          ((uint32_t)(vendor) << _X86_SOCKET_VENDOR_SHIFT | (bitval))
1303 1303  
1304 1304  #define X86_SOCKET_MATCH(s, mask) \
1305 1305          (_X86_SOCKET_VENDOR(s) == _X86_SOCKET_VENDOR(mask) && \
1306 1306          (_X86_SOCKET_TYPE(s) & _X86_SOCKET_TYPE(mask)) != 0)
1307 1307  
1308 1308  #define X86_SOCKET_UNKNOWN 0x0
1309 1309          /*
1310 1310           * AMD socket types
1311 1311           */
1312 1312  #define X86_SOCKET_754          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x01)
1313 1313  #define X86_SOCKET_939          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x02)
1314 1314  #define X86_SOCKET_940          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x03)
1315 1315  #define X86_SOCKET_S1g1         _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x04)
1316 1316  #define X86_SOCKET_AM2          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x05)
1317 1317  #define X86_SOCKET_F1207        _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x06)
1318 1318  #define X86_SOCKET_S1g2         _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x07)
1319 1319  #define X86_SOCKET_S1g3         _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x08)
1320 1320  #define X86_SOCKET_AM           _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x09)
1321 1321  #define X86_SOCKET_AM2R2        _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x0a)
1322 1322  #define X86_SOCKET_AM3          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x0b)
1323 1323  #define X86_SOCKET_G34          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x0c)
1324 1324  #define X86_SOCKET_ASB2         _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x0d)
1325 1325  #define X86_SOCKET_C32          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x0e)
1326 1326  #define X86_SOCKET_S1g4         _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x0f)
1327 1327  #define X86_SOCKET_FT1          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x10)
1328 1328  #define X86_SOCKET_FM1          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x11)
1329 1329  #define X86_SOCKET_FS1          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x12)
1330 1330  #define X86_SOCKET_AM3R2        _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x13)
1331 1331  #define X86_SOCKET_FP2          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x14)
1332 1332  #define X86_SOCKET_FS1R2        _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x15)
1333 1333  #define X86_SOCKET_FM2          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x16)
1334 1334  #define X86_SOCKET_FP3          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x17)
1335 1335  #define X86_SOCKET_FM2R2        _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x18)
1336 1336  #define X86_SOCKET_FP4          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x19)
1337 1337  #define X86_SOCKET_AM4          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x1a)
1338 1338  #define X86_SOCKET_FT3          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x1b)
1339 1339  #define X86_SOCKET_FT4          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x1c)
1340 1340  #define X86_SOCKET_FS1B         _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x1d)
1341 1341  #define X86_SOCKET_FT3B         _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x1e)
1342 1342  #define X86_SOCKET_SP3          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x1f)
1343 1343  #define X86_SOCKET_SP3R2        _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x20)
1344 1344  #define X86_SOCKET_FP5          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x21)
1345 1345  #define X86_SOCKET_FP6          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x22)
1346 1346  #define X86_SOCKET_STRX4        _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x23)
1347 1347  #define X86_SOCKET_SP5          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x24)
1348 1348  #define X86_SOCKET_AM5          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x25)
1349 1349  #define X86_SOCKET_FP7          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x26)
1350 1350  #define X86_SOCKET_FP7R2        _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x27)
1351 1351  #define X86_SOCKET_FF3          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x28)
1352 1352  #define X86_SOCKET_FT6          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x29)
1353 1353  #define X86_SOCKET_FP8          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x2a)
1354 1354  #define X86_SOCKET_FL1          _X86_SOCKET_MKVAL(X86_VENDOR_AMD, 0x2b)
1355 1355  #define X86_NUM_SOCKETS_AMD     0x2b
1356 1356  
1357 1357  /*
1358 1358   * Hygon socket types
1359 1359   */
1360 1360  #define X86_SOCKET_SL1          _X86_SOCKET_MKVAL(X86_VENDOR_HYGON, 0x01)
1361 1361  #define X86_SOCKET_SL1R2        _X86_SOCKET_MKVAL(X86_VENDOR_HYGON, 0x02)
1362 1362  #define X86_SOCKET_DM1          _X86_SOCKET_MKVAL(X86_VENDOR_HYGON, 0x03)
1363 1363  #define X86_NUM_SOCKETS_HYGON   0x03
1364 1364  
1365 1365  #define X86_NUM_SOCKETS         (X86_NUM_SOCKETS_AMD + X86_NUM_SOCKETS_HYGON)
1366 1366  
1367 1367  /*
1368 1368   * Definitions for Intel processor models. These are all for Family 6
1369 1369   * processors. This list and the Atom set below it are not exhuastive.
1370 1370   */
1371 1371  #define INTC_MODEL_YONAH                0x0e
1372 1372  #define INTC_MODEL_MEROM                0x0f
1373 1373  #define INTC_MODEL_MEROM_L              0x16
1374 1374  #define INTC_MODEL_PENRYN               0x17
1375 1375  #define INTC_MODEL_DUNNINGTON           0x1d
1376 1376  
1377 1377  #define INTC_MODEL_NEHALEM              0x1e
1378 1378  #define INTC_MODEL_NEHALEM2             0x1f
1379 1379  #define INTC_MODEL_NEHALEM_EP           0x1a
1380 1380  #define INTC_MODEL_NEHALEM_EX           0x2e
1381 1381  
1382 1382  #define INTC_MODEL_WESTMERE             0x25
1383 1383  #define INTC_MODEL_WESTMERE_EP          0x2c
1384 1384  #define INTC_MODEL_WESTMERE_EX          0x2f
1385 1385  
1386 1386  #define INTC_MODEL_SANDYBRIDGE          0x2a
1387 1387  #define INTC_MODEL_SANDYBRIDGE_XEON     0x2d
1388 1388  #define INTC_MODEL_IVYBRIDGE            0x3a
1389 1389  #define INTC_MODEL_IVYBRIDGE_XEON       0x3e
1390 1390  
1391 1391  #define INTC_MODEL_HASWELL              0x3c
1392 1392  #define INTC_MODEL_HASWELL_ULT          0x45
1393 1393  #define INTC_MODEL_HASWELL_GT3E         0x46
1394 1394  #define INTC_MODEL_HASWELL_XEON         0x3f
1395 1395  
1396 1396  #define INTC_MODEL_BROADWELL            0x3d
1397 1397  #define INTC_MODEL_BROADELL_2           0x47
1398 1398  #define INTC_MODEL_BROADWELL_XEON       0x4f
1399 1399  #define INTC_MODEL_BROADWELL_XEON_D     0x56
1400 1400  
1401 1401  #define INTC_MODEL_SKYLAKE_MOBILE       0x4e
1402 1402  /*
1403 1403   * Note, this model is shared with Cascade Lake and Cooper Lake.
1404 1404   */
1405 1405  #define INTC_MODEL_SKYLAKE_XEON         0x55
1406 1406  #define INTC_MODEL_SKYLAKE_DESKTOP      0x5e
1407 1407  
1408 1408  /*
1409 1409   * Note, both Kaby Lake models are shared with Coffee Lake, Whiskey Lake, Amber
1410 1410   * Lake, and some Comet Lake parts.
1411 1411   */
1412 1412  #define INTC_MODEL_KABYLAKE_MOBILE      0x8e
1413 1413  #define INTC_MODEL_KABYLAKE_DESKTOP     0x9e
1414 1414  
1415 1415  #define INTC_MODEL_ICELAKE_XEON         0x6a
1416 1416  #define INTC_MODEL_ICELAKE_MOBILE       0x7e
1417 1417  #define INTC_MODEL_TIGERLAKE_MOBILE     0x8c
1418 1418  
1419 1419  #define INTC_MODEL_COMETLAKE            0xa5
1420 1420  #define INTC_MODEL_COMETLAKE_MOBILE     0xa6
1421 1421  #define INTC_MODEL_ROCKETLAKE           0xa7
1422 1422  
1423 1423  /*
1424 1424   * Atom Processors
1425 1425   */
1426 1426  #define INTC_MODEL_SILVERTHORNE         0x1c
1427 1427  #define INTC_MODEL_LINCROFT             0x26
1428 1428  #define INTC_MODEL_PENWELL              0x27
1429 1429  #define INTC_MODEL_CLOVERVIEW           0x35
  
    | 
      ↓ open down ↓ | 
    1429 lines elided | 
    
      ↑ open up ↑ | 
  
1430 1430  #define INTC_MODEL_CEDARVIEW            0x36
1431 1431  #define INTC_MODEL_BAY_TRAIL            0x37
1432 1432  #define INTC_MODEL_AVATON               0x4d
1433 1433  #define INTC_MODEL_AIRMONT              0x4c
1434 1434  #define INTC_MODEL_GOLDMONT             0x5c
1435 1435  #define INTC_MODEL_DENVERTON            0x5f
1436 1436  #define INTC_MODEL_GEMINI_LAKE          0x7a
1437 1437  
1438 1438  /*
1439 1439   * xgetbv/xsetbv support
1440      - * See section 13.3 in vol. 1 of the Intel devlopers manual.
     1440 + * See section 13.3 in vol. 1 of the Intel Developer's manual.
1441 1441   */
1442 1442  
1443 1443  #define XFEATURE_ENABLED_MASK   0x0
1444 1444  /*
1445 1445   * XFEATURE_ENABLED_MASK values (eax)
1446 1446   * See setup_xfem().
1447 1447   */
1448      -#define XFEATURE_LEGACY_FP      0x1
1449      -#define XFEATURE_SSE            0x2
1450      -#define XFEATURE_AVX            0x4
1451      -#define XFEATURE_MPX            0x18    /* 2 bits, both 0 or 1 */
1452      -#define XFEATURE_AVX512         0xe0    /* 3 bits, all 0 or 1 */
     1448 +#define XFEATURE_LEGACY_FP      (1 << 0)
     1449 +#define XFEATURE_SSE            (1 << 1)
     1450 +#define XFEATURE_AVX            (1 << 2)
     1451 +/*
     1452 + * MPX is meant to be all or nothing, therefore for most of the kernel use the
     1453 + * following definition.
     1454 + */
     1455 +#define XFEATURE_MPX_BNDREGS    (1 << 3)
     1456 +#define XFEATURE_MPX_BNDCSR     (1 << 4)
     1457 +#define XFEATURE_MPX            (XFEATURE_MPX_BNDREGS | XFEATURE_MPX_BNDCSR)
     1458 +/*
     1459 + * AVX512 is meant to be all or nothing, therefore for most of the kernel use
     1460 + * the following definition.
     1461 + */
     1462 +#define XFEATURE_AVX512_OPMASK  (1 << 5)
     1463 +#define XFEATURE_AVX512_ZMM     (1 << 6)
     1464 +#define XFEATURE_AVX512_HI_ZMM  (1 << 7)
     1465 +#define XFEATURE_AVX512         (XFEATURE_AVX512_OPMASK | \
     1466 +        XFEATURE_AVX512_ZMM | XFEATURE_AVX512_HI_ZMM)
1453 1467          /* bit 8 unused */
1454      -#define XFEATURE_PKRU           0x200
     1468 +#define XFEATURE_PKRU           (1 << 9)
1455 1469  #define XFEATURE_FP_ALL \
1456 1470          (XFEATURE_LEGACY_FP | XFEATURE_SSE | XFEATURE_AVX | XFEATURE_MPX | \
1457 1471          XFEATURE_AVX512 | XFEATURE_PKRU)
1458 1472  
1459 1473  /*
1460 1474   * Define the set of xfeature flags that should be considered valid in the xsave
1461 1475   * state vector when we initialize an lwp. This is distinct from the full set so
1462 1476   * that all of the processor's normal logic and tracking of the xsave state is
1463 1477   * usable. This should correspond to the state that's been initialized by the
1464 1478   * ABI to hold meaningful values. Adding additional bits here can have serious
1465 1479   * performance implications and cause performance degradations when using the
1466 1480   * FPU vector (xmm) registers.
1467 1481   */
1468 1482  #define XFEATURE_FP_INITIAL     (XFEATURE_LEGACY_FP | XFEATURE_SSE)
1469 1483  
1470 1484  #if !defined(_ASM)
1471 1485  
1472 1486  #if defined(_KERNEL) || defined(_KMEMUSER)
1473 1487  
1474 1488  #define NUM_X86_FEATURES        109
1475 1489  extern uchar_t x86_featureset[];
1476 1490  
1477 1491  extern void free_x86_featureset(void *featureset);
1478 1492  extern boolean_t is_x86_feature(void *featureset, uint_t feature);
1479 1493  extern void add_x86_feature(void *featureset, uint_t feature);
1480 1494  extern void remove_x86_feature(void *featureset, uint_t feature);
1481 1495  extern boolean_t compare_x86_featureset(void *setA, void *setB);
1482 1496  extern void print_x86_featureset(void *featureset);
1483 1497  
1484 1498  
1485 1499  extern uint_t x86_type;
1486 1500  extern uint_t x86_vendor;
1487 1501  extern uint_t x86_clflush_size;
1488 1502  
1489 1503  extern uint_t pentiumpro_bug4046376;
1490 1504  
1491 1505  /*
1492 1506   * These functions are all used to perform various side-channel mitigations.
1493 1507   * Please see uts/i86pc/os/cpuid.c for more information.
1494 1508   */
1495 1509  extern void (*spec_uarch_flush)(void);
1496 1510  extern void x86_rsb_stuff(void);
1497 1511  extern void x86_md_clear(void);
1498 1512  
1499 1513  #endif
1500 1514  
1501 1515  #if defined(_KERNEL)
1502 1516  
1503 1517  /*
1504 1518   * This structure is used to pass arguments and get return values back
1505 1519   * from the CPUID instruction in __cpuid_insn() routine.
1506 1520   */
1507 1521  struct cpuid_regs {
1508 1522          uint32_t        cp_eax;
1509 1523          uint32_t        cp_ebx;
1510 1524          uint32_t        cp_ecx;
1511 1525          uint32_t        cp_edx;
1512 1526  };
1513 1527  
1514 1528  extern int x86_use_pcid;
1515 1529  extern int x86_use_invpcid;
1516 1530  
1517 1531  /*
1518 1532   * Utility functions to get/set extended control registers (XCR)
1519 1533   * Initial use is to get/set the contents of the XFEATURE_ENABLED_MASK.
1520 1534   */
1521 1535  extern uint64_t get_xcr(uint_t);
1522 1536  extern void set_xcr(uint_t, uint64_t);
1523 1537  
1524 1538  extern uint64_t rdmsr(uint_t);
1525 1539  extern void wrmsr(uint_t, const uint64_t);
1526 1540  extern uint64_t xrdmsr(uint_t);
1527 1541  extern void xwrmsr(uint_t, const uint64_t);
1528 1542  extern int checked_rdmsr(uint_t, uint64_t *);
1529 1543  extern int checked_wrmsr(uint_t, uint64_t);
1530 1544  
1531 1545  extern void invalidate_cache(void);
1532 1546  extern ulong_t getcr4(void);
1533 1547  extern void setcr4(ulong_t);
1534 1548  
1535 1549  extern void mtrr_sync(void);
1536 1550  
1537 1551  extern void cpu_fast_syscall_enable(void);
1538 1552  extern void cpu_fast_syscall_disable(void);
1539 1553  
1540 1554  typedef enum cpuid_pass {
1541 1555          CPUID_PASS_NONE = 0,
1542 1556          CPUID_PASS_PRELUDE,
1543 1557          CPUID_PASS_IDENT,
1544 1558          CPUID_PASS_BASIC,
1545 1559          CPUID_PASS_EXTENDED,
1546 1560          CPUID_PASS_DYNAMIC,
1547 1561          CPUID_PASS_RESOLVE
1548 1562  } cpuid_pass_t;
1549 1563  
1550 1564  struct cpu;
1551 1565  
1552 1566  extern boolean_t cpuid_checkpass(const struct cpu *const, const cpuid_pass_t);
1553 1567  extern void cpuid_execpass(struct cpu *, const cpuid_pass_t, void *);
1554 1568  extern void cpuid_pass_ucode(struct cpu *, uchar_t *);
1555 1569  extern uint32_t cpuid_insn(struct cpu *, struct cpuid_regs *);
1556 1570  extern uint32_t __cpuid_insn(struct cpuid_regs *);
1557 1571  extern int cpuid_getbrandstr(struct cpu *, char *, size_t);
1558 1572  extern int cpuid_getidstr(struct cpu *, char *, size_t);
1559 1573  extern const char *cpuid_getvendorstr(struct cpu *);
1560 1574  extern uint_t cpuid_getvendor(struct cpu *);
1561 1575  extern uint_t cpuid_getfamily(struct cpu *);
1562 1576  extern uint_t cpuid_getmodel(struct cpu *);
1563 1577  extern uint_t cpuid_getstep(struct cpu *);
1564 1578  extern uint_t cpuid_getsig(struct cpu *);
1565 1579  extern uint_t cpuid_get_ncpu_per_chip(struct cpu *);
1566 1580  extern uint_t cpuid_get_ncore_per_chip(struct cpu *);
1567 1581  extern uint_t cpuid_get_ncpu_sharing_last_cache(struct cpu *);
1568 1582  extern id_t cpuid_get_last_lvl_cacheid(struct cpu *);
  
    | 
      ↓ open down ↓ | 
    104 lines elided | 
    
      ↑ open up ↑ | 
  
1569 1583  extern int cpuid_get_chipid(struct cpu *);
1570 1584  extern id_t cpuid_get_coreid(struct cpu *);
1571 1585  extern int cpuid_get_pkgcoreid(struct cpu *);
1572 1586  extern int cpuid_get_clogid(struct cpu *);
1573 1587  extern int cpuid_get_cacheid(struct cpu *);
1574 1588  extern uint32_t cpuid_get_apicid(struct cpu *);
1575 1589  extern uint_t cpuid_get_procnodeid(struct cpu *cpu);
1576 1590  extern uint_t cpuid_get_procnodes_per_pkg(struct cpu *cpu);
1577 1591  extern uint_t cpuid_get_compunitid(struct cpu *cpu);
1578 1592  extern uint_t cpuid_get_cores_per_compunit(struct cpu *cpu);
1579      -extern size_t cpuid_get_xsave_size();
1580      -extern boolean_t cpuid_need_fp_excp_handling();
     1593 +extern size_t cpuid_get_xsave_size(void);
     1594 +extern void cpuid_get_xsave_info(uint64_t, size_t *, size_t *);
     1595 +extern boolean_t cpuid_need_fp_excp_handling(void);
1581 1596  extern int cpuid_is_cmt(struct cpu *);
1582 1597  extern int cpuid_syscall32_insn(struct cpu *);
1583 1598  extern int getl2cacheinfo(struct cpu *, int *, int *, int *);
1584 1599  
1585 1600  extern x86_chiprev_t cpuid_getchiprev(struct cpu *);
1586 1601  extern const char *cpuid_getchiprevstr(struct cpu *);
1587 1602  extern uint32_t cpuid_getsockettype(struct cpu *);
1588 1603  extern const char *cpuid_getsocketstr(struct cpu *);
1589 1604  extern x86_uarchrev_t cpuid_getuarchrev(struct cpu *);
1590 1605  
1591 1606  extern int cpuid_opteron_erratum(struct cpu *, uint_t);
1592 1607  
1593 1608  struct cpuid_info;
1594 1609  
1595 1610  extern void setx86isalist(void);
1596 1611  extern void cpuid_alloc_space(struct cpu *);
1597 1612  extern void cpuid_free_space(struct cpu *);
1598 1613  extern void cpuid_set_cpu_properties(void *, processorid_t,
1599 1614      struct cpuid_info *);
1600 1615  extern void cpuid_post_ucodeadm(void);
1601 1616  
1602 1617  extern void cpuid_get_addrsize(struct cpu *, uint_t *, uint_t *);
1603 1618  extern uint_t cpuid_get_dtlb_nent(struct cpu *, size_t);
1604 1619  
1605 1620  #if !defined(__xpv)
1606 1621  extern uint32_t *cpuid_mwait_alloc(struct cpu *);
1607 1622  extern void cpuid_mwait_free(struct cpu *);
1608 1623  extern int cpuid_deep_cstates_supported(void);
1609 1624  extern int cpuid_arat_supported(void);
1610 1625  extern int cpuid_iepb_supported(struct cpu *);
1611 1626  extern int cpuid_deadline_tsc_supported(void);
1612 1627  extern void vmware_port(int, uint32_t *);
1613 1628  #endif
1614 1629  
1615 1630  extern x86_processor_family_t chiprev_family(const x86_chiprev_t);
1616 1631  extern boolean_t chiprev_matches(const x86_chiprev_t, const x86_chiprev_t);
1617 1632  extern boolean_t chiprev_at_least(const x86_chiprev_t, const x86_chiprev_t);
1618 1633  
1619 1634  extern x86_uarch_t uarchrev_uarch(const x86_uarchrev_t);
1620 1635  extern boolean_t uarchrev_matches(const x86_uarchrev_t, const x86_uarchrev_t);
1621 1636  extern boolean_t uarchrev_at_least(const x86_uarchrev_t, const x86_uarchrev_t);
1622 1637  
1623 1638  struct cpu_ucode_info;
1624 1639  
1625 1640  extern void ucode_alloc_space(struct cpu *);
1626 1641  extern void ucode_free_space(struct cpu *);
1627 1642  extern void ucode_check(struct cpu *);
1628 1643  extern void ucode_cleanup();
1629 1644  
1630 1645  #if !defined(__xpv)
1631 1646  extern  char _tsc_mfence_start;
1632 1647  extern  char _tsc_mfence_end;
1633 1648  extern  char _tscp_start;
1634 1649  extern  char _tscp_end;
1635 1650  extern  char _no_rdtsc_start;
1636 1651  extern  char _no_rdtsc_end;
1637 1652  extern  char _tsc_lfence_start;
1638 1653  extern  char _tsc_lfence_end;
1639 1654  #endif
1640 1655  
1641 1656  #if !defined(__xpv)
1642 1657  extern  char bcopy_patch_start;
1643 1658  extern  char bcopy_patch_end;
1644 1659  extern  char bcopy_ck_size;
1645 1660  #endif
1646 1661  
1647 1662  extern void post_startup_cpu_fixups(void);
1648 1663  
1649 1664  extern uint_t workaround_errata(struct cpu *);
1650 1665  
1651 1666  #if defined(OPTERON_ERRATUM_93)
1652 1667  extern int opteron_erratum_93;
1653 1668  #endif
1654 1669  
1655 1670  #if defined(OPTERON_ERRATUM_91)
1656 1671  extern int opteron_erratum_91;
1657 1672  #endif
1658 1673  
1659 1674  #if defined(OPTERON_ERRATUM_100)
1660 1675  extern int opteron_erratum_100;
1661 1676  #endif
1662 1677  
1663 1678  #if defined(OPTERON_ERRATUM_121)
1664 1679  extern int opteron_erratum_121;
1665 1680  #endif
1666 1681  
1667 1682  #if defined(OPTERON_ERRATUM_147)
1668 1683  extern int opteron_erratum_147;
1669 1684  extern void patch_erratum_147(void);
1670 1685  #endif
1671 1686  
1672 1687  #if !defined(__xpv)
1673 1688  extern void determine_platform(void);
1674 1689  #endif
1675 1690  extern int get_hwenv(void);
1676 1691  extern int is_controldom(void);
1677 1692  
1678 1693  extern void enable_pcid(void);
1679 1694  
1680 1695  extern void xsave_setup_msr(struct cpu *);
1681 1696  
1682 1697  #if !defined(__xpv)
1683 1698  extern void reset_gdtr_limit(void);
1684 1699  #endif
1685 1700  
1686 1701  extern int enable_platform_detection;
1687 1702  
1688 1703  /*
1689 1704   * Hypervisor signatures
1690 1705   */
1691 1706  #define HVSIG_XEN_HVM   "XenVMMXenVMM"
1692 1707  #define HVSIG_VMWARE    "VMwareVMware"
1693 1708  #define HVSIG_KVM       "KVMKVMKVM"
1694 1709  #define HVSIG_MICROSOFT "Microsoft Hv"
1695 1710  #define HVSIG_BHYVE     "bhyve bhyve "
1696 1711  
1697 1712  /*
1698 1713   * Defined hardware environments
1699 1714   */
1700 1715  #define HW_NATIVE       (1 << 0)        /* Running on bare metal */
1701 1716  #define HW_XEN_PV       (1 << 1)        /* Running on Xen PVM */
1702 1717  
1703 1718  #define HW_XEN_HVM      (1 << 2)        /* Running on Xen HVM */
1704 1719  #define HW_VMWARE       (1 << 3)        /* Running on VMware hypervisor */
1705 1720  #define HW_KVM          (1 << 4)        /* Running on KVM hypervisor */
1706 1721  #define HW_MICROSOFT    (1 << 5)        /* Running on Microsoft hypervisor */
1707 1722  #define HW_BHYVE        (1 << 6)        /* Running on bhyve hypervisor */
1708 1723  
1709 1724  #define HW_VIRTUAL      (HW_XEN_HVM | HW_VMWARE | HW_KVM | HW_MICROSOFT | \
1710 1725              HW_BHYVE)
1711 1726  
1712 1727  #endif  /* _KERNEL */
1713 1728  
1714 1729  #endif  /* !_ASM */
1715 1730  
1716 1731  /*
1717 1732   * VMware hypervisor related defines
1718 1733   */
1719 1734  #define VMWARE_HVMAGIC          0x564d5868
1720 1735  #define VMWARE_HVPORT           0x5658
1721 1736  #define VMWARE_HVCMD_GETVERSION 0x0a
1722 1737  #define VMWARE_HVCMD_GETTSCFREQ 0x2d
1723 1738  
1724 1739  #ifdef  __cplusplus
1725 1740  }
1726 1741  #endif
1727 1742  
1728 1743  #endif  /* _SYS_X86_ARCHEXT_H */
  
    | 
      ↓ open down ↓ | 
    138 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX