Print this page
16413 Post-barrier Return Stack Buffer (consider no-eIBRS cases)

*** 9,18 **** --- 9,19 ---- * http://www.illumos.org/license/CDDL. */ /* * Copyright 2019 Joyent, Inc. + * Copyright 2024 MNX Cloud, Inc. */ .file "retpoline.s" /*
*** 117,138 **** RETPOLINE_MKJUMP(r13) RETPOLINE_MKJUMP(r14) RETPOLINE_MKJUMP(r15) /* ! * The x86_rsb_stuff function is called from pretty arbitrary ! * contexts. It's much easier for us to save and restore all the ! * registers we touch rather than clobber them for callers. You must ! * preserve this property or the system will panic at best. */ ! ENTRY(x86_rsb_stuff) ! /* ! * These nops are present so we can patch a ret instruction if we need ! * to disable RSB stuffing because enhanced IBRS is present or we're ! * disabling mitigations. ! */ nop nop pushq %rdi pushq %rax movl $16, %edi movq %rsp, %rax --- 118,140 ---- RETPOLINE_MKJUMP(r13) RETPOLINE_MKJUMP(r14) RETPOLINE_MKJUMP(r15) /* ! * The x86_rsb_stuff{,_vmexit} functions can be called from pretty ! * arbitrary contexts. It's much easier for us to save and restore all ! * the registers we touch rather than clobber them for callers. You ! * must preserve this property or the system will panic at best. ! * ! * The two entry points are because the need to RSB stuff on Intel ! * depends greatly on factors that are different in the VMEXIT case, ! * vs. the other switching cases. See cpuid.c's cpuid_patch_rsb() ! * for details. */ ! ENTRY(x86_rsb_stuff_vmexit) nop + ALTENTRY(x86_rsb_stuff) nop pushq %rdi pushq %rax movl $16, %edi movq %rsp, %rax
*** 152,161 **** --- 154,164 ---- movq %rax, %rsp popq %rax popq %rdi ret SET_SIZE(x86_rsb_stuff) + SET_SIZE(x86_rsb_stuff_vmexit) #elif defined(__i386) /* * While the kernel is 64-bit only, dboot is still 32-bit, so there are a