Print this page
16413 Post-barrier Return Stack Buffer (consider no-eIBRS cases)
16413 Post-barrier Return Stack Buffer (PBRSB) fixes can be detected in HW

@@ -1673,11 +1673,12 @@
         "avx512_bitalg",
         "avx512_vbmi2",
         "avx512_bf16",
         "auto_ibrs",
         "rfds_no",
-        "rfds_clear"
+        "rfds_clear",
+        "pbrsb_no"
 };
 
 boolean_t
 is_x86_feature(void *featureset, uint_t feature)
 {

@@ -2966,35 +2967,67 @@
         }
         membar_producer();
 }
 
 /*
- * We default to enabling RSB mitigations.
+ * We default to enabling Return Stack Buffer (RSB) mitigations.
  *
- * NOTE: We used to skip RSB mitigations with eIBRS, but developments around
- * post-barrier RSB guessing suggests we should enable RSB mitigations always
- * unless specifically instructed not to.
+ * We used to skip RSB mitigations with Intel eIBRS, but developments around
+ * post-barrier RSB (PBRSB) guessing suggests we should enable Intel RSB
+ * mitigations always unless explicitly bypassed, or unless hardware indicates
+ * the bug has been fixed. Intel also says that machines without eIBRS do not
+ * have VMEXIT problems with PBRSB. Basically, if we're Intel and have eIBRS,
+ * we must stuff the RSB in both context switches AND in VMEXIT, unless the
+ * hardware says the PBRSB bug is fixed.  If we're Intel but without eIBRS
+ * (i.e. using retpolines), we must stuff the RSB in context switches, but we
+ * do not have to for VMEXIT.
  *
+ * See (pardon broken URL)  https://www.intel.com/content/www/us/en/developer \
+ * /articles/technical/software-security-guidance/advisory-guidance
+ * /post-barrier-return-stack-buffer-predictions.html
+ *
  * AMD indicates that when Automatic IBRS is enabled we do not need to implement
  * return stack buffer clearing for VMEXIT as it takes care of it. The manual
  * also states that as long as SMEP and we maintain at least one page between
  * the kernel and user space (we have much more of a red zone), then we do not
  * need to clear the RSB. We constrain this to only when Automatic IRBS is
  * present.
  */
 static void
-cpuid_patch_rsb(x86_spectrev2_mitigation_t mit)
+cpuid_patch_rsb(x86_spectrev2_mitigation_t mit, bool intel_pbrsb_no)
 {
         const uint8_t ret = RET_INSTR;
         uint8_t *stuff = (uint8_t *)x86_rsb_stuff;
+        uint8_t *vmx_stuff = (uint8_t *)x86_rsb_stuff_vmexit;
 
         switch (mit) {
         case X86_SPECTREV2_AUTO_IBRS:
         case X86_SPECTREV2_DISABLED:
+                /* Don't bother with any RSB stuffing! */
                 *stuff = ret;
+                *vmx_stuff = ret;
                 break;
+        case X86_SPECTREV2_RETPOLINE:
+                /*
+                 * The Intel document on Post-Barrier RSB says that processors
+                 * without eIBRS do not have PBRSB problems upon VMEXIT.
+                 */
+                VERIFY(!intel_pbrsb_no);
+                VERIFY3U(*stuff, !=, ret);
+                *vmx_stuff = ret;
+                break;
         default:
+                /*
+                 * eIBRS is all that's left.  If CPU claims PBRSB is fixed,
+                 * don't use the RSB mitigation in either case.
+                 */
+                if (intel_pbrsb_no) {
+                        /* CPU claims PBRSB problems are fixed. */
+                        *stuff = ret;
+                        *vmx_stuff = ret;
+                }
+                VERIFY3U(*stuff, ==, *vmx_stuff);
                 break;
         }
 }
 
 static void

@@ -3265,11 +3298,15 @@
                                 }
                                 if (reg & IA32_ARCH_CAP_RFDS_CLEAR) {
                                         add_x86_feature(featureset,
                                             X86FSET_RFDS_CLEAR);
                                 }
+                                if (reg & IA32_ARCH_CAP_PBRSB_NO) {
+                                        add_x86_feature(featureset,
+                                            X86FSET_PBRSB_NO);
                         }
+                        }
                         no_trap();
                 }
 #endif  /* !__xpv */
 
                 if (ecp->cp_edx & CPUID_INTC_EDX_7_0_SSBD)

@@ -3325,14 +3362,21 @@
         } else {
                 v2mit = X86_SPECTREV2_RETPOLINE;
         }
 
         cpuid_patch_retpolines(v2mit);
-        cpuid_patch_rsb(v2mit);
         x86_spectrev2_mitigation = v2mit;
         membar_producer();
 
+        /*
+         * Return-stack buffer clearing may need a software-sequence. Discover
+         * and patch as appropriate, after setting the SPECTREv2 global
+         * mitigation level.
+         */
+        cpuid_patch_rsb(v2mit, is_x86_feature(featureset, X86FSET_PBRSB_NO));
+        membar_producer();
+
         /*
          * We need to determine what changes are required for mitigating L1TF
          * and MDS. If the CPU suffers from either of them, then SMT exclusion
          * is required.
          *