Print this page
16413 Post-barrier Return Stack Buffer (consider no-eIBRS cases)
   1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright 2019 Joyent, Inc.

  14  */
  15 
  16         .file   "retpoline.s"
  17 
  18 /*
  19  * This file implements the various hooks that are needed for retpolines and
  20  * return stack buffer (RSB) stuffing. For more information, please see the
  21  * 'Speculative Execution CPU Side Channel Security' section of the
  22  * uts/i86pc/os/cpuid.c big theory statement.
  23  */
  24 
  25 #include <sys/asm_linkage.h>
  26 #include <sys/x86_archext.h>
  27 
  28 #if defined(__amd64)
  29 
  30 /*
  31  * This macro generates the default retpoline entry point that the compiler
  32  * expects. It implements the expected retpoline form.
  33  */


 102         RETPOLINE_MKGENERIC(r14)
 103         RETPOLINE_MKGENERIC(r15)
 104 
 105         RETPOLINE_MKJUMP(rax)
 106         RETPOLINE_MKJUMP(rbx)
 107         RETPOLINE_MKJUMP(rcx)
 108         RETPOLINE_MKJUMP(rdx)
 109         RETPOLINE_MKJUMP(rdi)
 110         RETPOLINE_MKJUMP(rsi)
 111         RETPOLINE_MKJUMP(rbp)
 112         RETPOLINE_MKJUMP(r8)
 113         RETPOLINE_MKJUMP(r9)
 114         RETPOLINE_MKJUMP(r10)
 115         RETPOLINE_MKJUMP(r11)
 116         RETPOLINE_MKJUMP(r12)
 117         RETPOLINE_MKJUMP(r13)
 118         RETPOLINE_MKJUMP(r14)
 119         RETPOLINE_MKJUMP(r15)
 120 
 121         /*
 122          * The x86_rsb_stuff function is called from pretty arbitrary
 123          * contexts. It's much easier for us to save and restore all the
 124          * registers we touch rather than clobber them for callers. You must
 125          * preserve this property or the system will panic at best.





 126          */
 127         ENTRY(x86_rsb_stuff)
 128         /*
 129          * These nops are present so we can patch a ret instruction if we need
 130          * to disable RSB stuffing because enhanced IBRS is present or we're
 131          * disabling mitigations.
 132          */
 133         nop

 134         nop
 135         pushq   %rdi
 136         pushq   %rax
 137         movl    $16, %edi
 138         movq    %rsp, %rax
 139 rsb_loop:
 140         call    2f
 141 1:
 142         pause
 143         call    1b
 144 2:
 145         call    2f
 146 1:
 147         pause
 148         call    1b
 149 2:
 150         subl    $1, %edi
 151         jnz     rsb_loop
 152         movq    %rax, %rsp
 153         popq    %rax
 154         popq    %rdi
 155         ret
 156         SET_SIZE(x86_rsb_stuff)

 157 
 158 #elif defined(__i386)
 159 
 160 /*
 161  * While the kernel is 64-bit only, dboot is still 32-bit, so there are a
 162  * limited number of variants that are used for 32-bit. However as dboot is
 163  * short lived and uses them sparingly, we only do the full variant and do not
 164  * have an AMD specific version.
 165  */
 166 
 167 #define RETPOLINE_MKTHUNK(reg) \
 168         ENTRY(__x86_indirect_thunk_##reg)       \
 169         call    2f;                             \
 170 1:                                              \
 171         pause;                                  \
 172         lfence;                                 \
 173         jmp     1b;                             \
 174 2:                                              \
 175         movl    %##reg, (%esp);         \
 176         ret;                                    \
   1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright 2019 Joyent, Inc.
  14  * Copyright 2024 MNX Cloud, Inc.
  15  */
  16 
  17         .file   "retpoline.s"
  18 
  19 /*
  20  * This file implements the various hooks that are needed for retpolines and
  21  * return stack buffer (RSB) stuffing. For more information, please see the
  22  * 'Speculative Execution CPU Side Channel Security' section of the
  23  * uts/i86pc/os/cpuid.c big theory statement.
  24  */
  25 
  26 #include <sys/asm_linkage.h>
  27 #include <sys/x86_archext.h>
  28 
  29 #if defined(__amd64)
  30 
  31 /*
  32  * This macro generates the default retpoline entry point that the compiler
  33  * expects. It implements the expected retpoline form.
  34  */


 103         RETPOLINE_MKGENERIC(r14)
 104         RETPOLINE_MKGENERIC(r15)
 105 
 106         RETPOLINE_MKJUMP(rax)
 107         RETPOLINE_MKJUMP(rbx)
 108         RETPOLINE_MKJUMP(rcx)
 109         RETPOLINE_MKJUMP(rdx)
 110         RETPOLINE_MKJUMP(rdi)
 111         RETPOLINE_MKJUMP(rsi)
 112         RETPOLINE_MKJUMP(rbp)
 113         RETPOLINE_MKJUMP(r8)
 114         RETPOLINE_MKJUMP(r9)
 115         RETPOLINE_MKJUMP(r10)
 116         RETPOLINE_MKJUMP(r11)
 117         RETPOLINE_MKJUMP(r12)
 118         RETPOLINE_MKJUMP(r13)
 119         RETPOLINE_MKJUMP(r14)
 120         RETPOLINE_MKJUMP(r15)
 121 
 122         /*
 123          * The x86_rsb_stuff{,_vmexit} functions can be called from pretty
 124          * arbitrary contexts. It's much easier for us to save and restore all
 125          * the registers we touch rather than clobber them for callers. You
 126          * must preserve this property or the system will panic at best.
 127          *
 128          * The two entry points are because the need to RSB stuff on Intel
 129          * depends greatly on factors that are different in the VMEXIT case,
 130          * vs. the other switching cases.  See cpuid.c's cpuid_patch_rsb()
 131          * for details.
 132          */
 133         ENTRY(x86_rsb_stuff_vmexit)





 134         nop
 135         ALTENTRY(x86_rsb_stuff)
 136         nop
 137         pushq   %rdi
 138         pushq   %rax
 139         movl    $16, %edi
 140         movq    %rsp, %rax
 141 rsb_loop:
 142         call    2f
 143 1:
 144         pause
 145         call    1b
 146 2:
 147         call    2f
 148 1:
 149         pause
 150         call    1b
 151 2:
 152         subl    $1, %edi
 153         jnz     rsb_loop
 154         movq    %rax, %rsp
 155         popq    %rax
 156         popq    %rdi
 157         ret
 158         SET_SIZE(x86_rsb_stuff)
 159         SET_SIZE(x86_rsb_stuff_vmexit)
 160 
 161 #elif defined(__i386)
 162 
 163 /*
 164  * While the kernel is 64-bit only, dboot is still 32-bit, so there are a
 165  * limited number of variants that are used for 32-bit. However as dboot is
 166  * short lived and uses them sparingly, we only do the full variant and do not
 167  * have an AMD specific version.
 168  */
 169 
 170 #define RETPOLINE_MKTHUNK(reg) \
 171         ENTRY(__x86_indirect_thunk_##reg)       \
 172         call    2f;                             \
 173 1:                                              \
 174         pause;                                  \
 175         lfence;                                 \
 176         jmp     1b;                             \
 177 2:                                              \
 178         movl    %##reg, (%esp);         \
 179         ret;                                    \