Print this page
16413 Post-barrier Return Stack Buffer (consider no-eIBRS cases)
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/intel/ml/retpoline.S
+++ new/usr/src/uts/intel/ml/retpoline.S
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright 2019 Joyent, Inc.
14 + * Copyright 2024 MNX Cloud, Inc.
14 15 */
15 16
16 17 .file "retpoline.s"
17 18
18 19 /*
19 20 * This file implements the various hooks that are needed for retpolines and
20 21 * return stack buffer (RSB) stuffing. For more information, please see the
21 22 * 'Speculative Execution CPU Side Channel Security' section of the
22 23 * uts/i86pc/os/cpuid.c big theory statement.
23 24 */
24 25
25 26 #include <sys/asm_linkage.h>
26 27 #include <sys/x86_archext.h>
27 28
28 29 #if defined(__amd64)
29 30
30 31 /*
31 32 * This macro generates the default retpoline entry point that the compiler
32 33 * expects. It implements the expected retpoline form.
33 34 */
34 35 #define RETPOLINE_MKTHUNK(reg) \
35 36 ENTRY(__x86_indirect_thunk_##reg) \
36 37 call 2f; \
37 38 1: \
38 39 pause; \
39 40 lfence; \
40 41 jmp 1b; \
41 42 2: \
42 43 movq %##reg, (%rsp); \
43 44 ret; \
44 45 SET_SIZE(__x86_indirect_thunk_##reg)
45 46
46 47 /*
47 48 * This macro generates the default retpoline form. It exists in addition to the
48 49 * thunk so if we need to restore the default retpoline behavior to the thunk
49 50 * we can.
50 51 */
51 52 #define RETPOLINE_MKGENERIC(reg) \
52 53 ENTRY(__x86_indirect_thunk_gen_##reg) \
53 54 call 2f; \
54 55 1: \
55 56 pause; \
56 57 lfence; \
57 58 jmp 1b; \
58 59 2: \
59 60 movq %##reg, (%rsp); \
60 61 ret; \
61 62 SET_SIZE(__x86_indirect_thunk_gen_##reg)
62 63
63 64 /*
64 65 * This macro generates the no-op form of the retpoline which will be used if we
65 66 * either need to disable retpolines because we have enhanced IBRS or because we
66 67 * have been asked to disable mitigations.
67 68 */
68 69 #define RETPOLINE_MKJUMP(reg) \
69 70 ENTRY(__x86_indirect_thunk_jmp_##reg) \
70 71 jmp *%##reg; \
71 72 SET_SIZE(__x86_indirect_thunk_jmp_##reg)
72 73
73 74 RETPOLINE_MKTHUNK(rax)
74 75 RETPOLINE_MKTHUNK(rbx)
75 76 RETPOLINE_MKTHUNK(rcx)
76 77 RETPOLINE_MKTHUNK(rdx)
77 78 RETPOLINE_MKTHUNK(rdi)
78 79 RETPOLINE_MKTHUNK(rsi)
79 80 RETPOLINE_MKTHUNK(rbp)
80 81 RETPOLINE_MKTHUNK(r8)
81 82 RETPOLINE_MKTHUNK(r9)
82 83 RETPOLINE_MKTHUNK(r10)
83 84 RETPOLINE_MKTHUNK(r11)
84 85 RETPOLINE_MKTHUNK(r12)
85 86 RETPOLINE_MKTHUNK(r13)
86 87 RETPOLINE_MKTHUNK(r14)
87 88 RETPOLINE_MKTHUNK(r15)
88 89
89 90 RETPOLINE_MKGENERIC(rax)
90 91 RETPOLINE_MKGENERIC(rbx)
91 92 RETPOLINE_MKGENERIC(rcx)
92 93 RETPOLINE_MKGENERIC(rdx)
93 94 RETPOLINE_MKGENERIC(rdi)
94 95 RETPOLINE_MKGENERIC(rsi)
95 96 RETPOLINE_MKGENERIC(rbp)
96 97 RETPOLINE_MKGENERIC(r8)
97 98 RETPOLINE_MKGENERIC(r9)
98 99 RETPOLINE_MKGENERIC(r10)
99 100 RETPOLINE_MKGENERIC(r11)
100 101 RETPOLINE_MKGENERIC(r12)
101 102 RETPOLINE_MKGENERIC(r13)
102 103 RETPOLINE_MKGENERIC(r14)
103 104 RETPOLINE_MKGENERIC(r15)
104 105
105 106 RETPOLINE_MKJUMP(rax)
106 107 RETPOLINE_MKJUMP(rbx)
107 108 RETPOLINE_MKJUMP(rcx)
108 109 RETPOLINE_MKJUMP(rdx)
109 110 RETPOLINE_MKJUMP(rdi)
110 111 RETPOLINE_MKJUMP(rsi)
111 112 RETPOLINE_MKJUMP(rbp)
|
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
112 113 RETPOLINE_MKJUMP(r8)
113 114 RETPOLINE_MKJUMP(r9)
114 115 RETPOLINE_MKJUMP(r10)
115 116 RETPOLINE_MKJUMP(r11)
116 117 RETPOLINE_MKJUMP(r12)
117 118 RETPOLINE_MKJUMP(r13)
118 119 RETPOLINE_MKJUMP(r14)
119 120 RETPOLINE_MKJUMP(r15)
120 121
121 122 /*
122 - * The x86_rsb_stuff function is called from pretty arbitrary
123 - * contexts. It's much easier for us to save and restore all the
124 - * registers we touch rather than clobber them for callers. You must
125 - * preserve this property or the system will panic at best.
123 + * The x86_rsb_stuff{,_vmexit} functions can be called from pretty
124 + * arbitrary contexts. It's much easier for us to save and restore all
125 + * the registers we touch rather than clobber them for callers. You
126 + * must preserve this property or the system will panic at best.
127 + *
128 + * The two entry points are because the need to RSB stuff on Intel
129 + * depends greatly on factors that are different in the VMEXIT case,
130 + * vs. the other switching cases. See cpuid.c's cpuid_patch_rsb()
131 + * for details.
126 132 */
127 - ENTRY(x86_rsb_stuff)
128 - /*
129 - * These nops are present so we can patch a ret instruction if we need
130 - * to disable RSB stuffing because enhanced IBRS is present or we're
131 - * disabling mitigations.
132 - */
133 + ENTRY(x86_rsb_stuff_vmexit)
133 134 nop
135 + ALTENTRY(x86_rsb_stuff)
134 136 nop
135 137 pushq %rdi
136 138 pushq %rax
137 139 movl $16, %edi
138 140 movq %rsp, %rax
139 141 rsb_loop:
140 142 call 2f
141 143 1:
142 144 pause
143 145 call 1b
144 146 2:
145 147 call 2f
146 148 1:
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
147 149 pause
148 150 call 1b
149 151 2:
150 152 subl $1, %edi
151 153 jnz rsb_loop
152 154 movq %rax, %rsp
153 155 popq %rax
154 156 popq %rdi
155 157 ret
156 158 SET_SIZE(x86_rsb_stuff)
159 + SET_SIZE(x86_rsb_stuff_vmexit)
157 160
158 161 #elif defined(__i386)
159 162
160 163 /*
161 164 * While the kernel is 64-bit only, dboot is still 32-bit, so there are a
162 165 * limited number of variants that are used for 32-bit. However as dboot is
163 166 * short lived and uses them sparingly, we only do the full variant and do not
164 167 * have an AMD specific version.
165 168 */
166 169
167 170 #define RETPOLINE_MKTHUNK(reg) \
168 171 ENTRY(__x86_indirect_thunk_##reg) \
169 172 call 2f; \
170 173 1: \
171 174 pause; \
172 175 lfence; \
173 176 jmp 1b; \
174 177 2: \
175 178 movl %##reg, (%esp); \
176 179 ret; \
177 180 SET_SIZE(__x86_indirect_thunk_##reg)
178 181
179 182 RETPOLINE_MKTHUNK(edi)
180 183 RETPOLINE_MKTHUNK(eax)
181 184
182 185 #else
183 186 #error "Your architecture is in another castle."
184 187 #endif
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX