1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright 2019 Joyent, Inc.
  14  */
  15 
  16         .file   "retpoline.s"
  17 
  18 /*
  19  * This file implements the various hooks that are needed for retpolines and
  20  * return stack buffer (RSB) stuffing. For more information, please see the
  21  * 'Speculative Execution CPU Side Channel Security' section of the
  22  * uts/i86pc/os/cpuid.c big theory statement.
  23  */
  24 
  25 #include <sys/asm_linkage.h>
  26 #include <sys/x86_archext.h>
  27 
  28 #if defined(__amd64)
  29 
  30 /*
  31  * This macro generates the default retpoline entry point that the compiler
  32  * expects. It implements the expected retpoline form.
  33  */
  34 #define RETPOLINE_MKTHUNK(reg) \
  35         ENTRY(__x86_indirect_thunk_##reg)       \
  36         call    2f;                             \
  37 1:                                              \
  38         pause;                                  \
  39         lfence;                                 \
  40         jmp     1b;                             \
  41 2:                                              \
  42         movq    %##reg, (%rsp);         \
  43         ret;                                    \
  44         SET_SIZE(__x86_indirect_thunk_##reg)
  45 
  46 /*
  47  * This macro generates the default retpoline form. It exists in addition to the
  48  * thunk so if we need to restore the default retpoline behavior to the thunk
  49  * we can.
  50  */
  51 #define RETPOLINE_MKGENERIC(reg) \
  52         ENTRY(__x86_indirect_thunk_gen_##reg)   \
  53         call    2f;                             \
  54 1:                                              \
  55         pause;                                  \
  56         lfence;                                 \
  57         jmp     1b;                             \
  58 2:                                              \
  59         movq    %##reg, (%rsp);         \
  60         ret;                                    \
  61         SET_SIZE(__x86_indirect_thunk_gen_##reg)
  62 
  63 /*
  64  * This macro generates the no-op form of the retpoline which will be used if we
  65  * either need to disable retpolines because we have enhanced IBRS or because we
  66  * have been asked to disable mitigations.
  67  */
  68 #define RETPOLINE_MKJUMP(reg)                   \
  69         ENTRY(__x86_indirect_thunk_jmp_##reg)   \
  70         jmp     *%##reg;                        \
  71         SET_SIZE(__x86_indirect_thunk_jmp_##reg)
  72 
  73         RETPOLINE_MKTHUNK(rax)
  74         RETPOLINE_MKTHUNK(rbx)
  75         RETPOLINE_MKTHUNK(rcx)
  76         RETPOLINE_MKTHUNK(rdx)
  77         RETPOLINE_MKTHUNK(rdi)
  78         RETPOLINE_MKTHUNK(rsi)
  79         RETPOLINE_MKTHUNK(rbp)
  80         RETPOLINE_MKTHUNK(r8)
  81         RETPOLINE_MKTHUNK(r9)
  82         RETPOLINE_MKTHUNK(r10)
  83         RETPOLINE_MKTHUNK(r11)
  84         RETPOLINE_MKTHUNK(r12)
  85         RETPOLINE_MKTHUNK(r13)
  86         RETPOLINE_MKTHUNK(r14)
  87         RETPOLINE_MKTHUNK(r15)
  88 
  89         RETPOLINE_MKGENERIC(rax)
  90         RETPOLINE_MKGENERIC(rbx)
  91         RETPOLINE_MKGENERIC(rcx)
  92         RETPOLINE_MKGENERIC(rdx)
  93         RETPOLINE_MKGENERIC(rdi)
  94         RETPOLINE_MKGENERIC(rsi)
  95         RETPOLINE_MKGENERIC(rbp)
  96         RETPOLINE_MKGENERIC(r8)
  97         RETPOLINE_MKGENERIC(r9)
  98         RETPOLINE_MKGENERIC(r10)
  99         RETPOLINE_MKGENERIC(r11)
 100         RETPOLINE_MKGENERIC(r12)
 101         RETPOLINE_MKGENERIC(r13)
 102         RETPOLINE_MKGENERIC(r14)
 103         RETPOLINE_MKGENERIC(r15)
 104 
 105         RETPOLINE_MKJUMP(rax)
 106         RETPOLINE_MKJUMP(rbx)
 107         RETPOLINE_MKJUMP(rcx)
 108         RETPOLINE_MKJUMP(rdx)
 109         RETPOLINE_MKJUMP(rdi)
 110         RETPOLINE_MKJUMP(rsi)
 111         RETPOLINE_MKJUMP(rbp)
 112         RETPOLINE_MKJUMP(r8)
 113         RETPOLINE_MKJUMP(r9)
 114         RETPOLINE_MKJUMP(r10)
 115         RETPOLINE_MKJUMP(r11)
 116         RETPOLINE_MKJUMP(r12)
 117         RETPOLINE_MKJUMP(r13)
 118         RETPOLINE_MKJUMP(r14)
 119         RETPOLINE_MKJUMP(r15)
 120 
 121         /*
 122          * The x86_rsb_stuff function is called from pretty arbitrary
 123          * contexts. It's much easier for us to save and restore all the
 124          * registers we touch rather than clobber them for callers. You must
 125          * preserve this property or the system will panic at best.
 126          */
 127         ENTRY(x86_rsb_stuff)
 128         /*
 129          * These nops are present so we can patch a ret instruction if we need
 130          * to disable RSB stuffing because enhanced IBRS is present or we're
 131          * disabling mitigations.
 132          */
 133         nop
 134         nop
 135         pushq   %rdi
 136         pushq   %rax
 137         movl    $16, %edi
 138         movq    %rsp, %rax
 139 rsb_loop:
 140         call    2f
 141 1:
 142         pause
 143         call    1b
 144 2:
 145         call    2f
 146 1:
 147         pause
 148         call    1b
 149 2:
 150         subl    $1, %edi
 151         jnz     rsb_loop
 152         movq    %rax, %rsp
 153         popq    %rax
 154         popq    %rdi
 155         ret
 156         SET_SIZE(x86_rsb_stuff)
 157 
 158 #elif defined(__i386)
 159 
 160 /*
 161  * While the kernel is 64-bit only, dboot is still 32-bit, so there are a
 162  * limited number of variants that are used for 32-bit. However as dboot is
 163  * short lived and uses them sparingly, we only do the full variant and do not
 164  * have an AMD specific version.
 165  */
 166 
 167 #define RETPOLINE_MKTHUNK(reg) \
 168         ENTRY(__x86_indirect_thunk_##reg)       \
 169         call    2f;                             \
 170 1:                                              \
 171         pause;                                  \
 172         lfence;                                 \
 173         jmp     1b;                             \
 174 2:                                              \
 175         movl    %##reg, (%esp);         \
 176         ret;                                    \
 177         SET_SIZE(__x86_indirect_thunk_##reg)
 178 
 179         RETPOLINE_MKTHUNK(edi)
 180         RETPOLINE_MKTHUNK(eax)
 181 
 182 #else
 183 #error  "Your architecture is in another castle."
 184 #endif