Print this page
13275 bhyve needs richer INIT/SIPI support
Reviewed by: Robert Mustacchi <rm@fingolfin.org>
Approved by: Gordon Ross <gordon.w.ross@gmail.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/io/vmm/amd/svm.c
          +++ new/usr/src/uts/i86pc/io/vmm/amd/svm.c
↓ open down ↓ 1909 lines elided ↑ open up ↑
1910 1910          load_dr3(gctx->host_dr3);
1911 1911          load_dr6(gctx->host_dr6);
1912 1912          wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl);
1913 1913          load_dr7(gctx->host_dr7);
1914 1914  }
1915 1915  
1916 1916  /*
1917 1917   * Start vcpu with specified RIP.
1918 1918   */
1919 1919  static int
1920      -svm_vmrun(void *arg, int vcpu, uint64_t rip, pmap_t pmap,
1921      -    struct vm_eventinfo *evinfo)
     1920 +svm_vmrun(void *arg, int vcpu, uint64_t rip, pmap_t pmap)
1922 1921  {
1923 1922          struct svm_regctx *gctx;
1924 1923          struct svm_softc *svm_sc;
1925 1924          struct svm_vcpu *vcpustate;
1926 1925          struct vmcb_state *state;
1927 1926          struct vmcb_ctrl *ctrl;
1928 1927          struct vm_exit *vmexit;
1929 1928          struct vlapic *vlapic;
1930 1929          struct vm *vm;
1931 1930          uint64_t vmcb_pa;
↓ open down ↓ 71 lines elided ↑ open up ↑
2003 2002                   */
2004 2003                  disable_gintr();
2005 2004  
2006 2005                  /*
2007 2006                   * Synchronizing and injecting vlapic state is lock-free and is
2008 2007                   * safe (and prudent) to perform with interrupts disabled.
2009 2008                   */
2010 2009                  inject_state = svm_inject_vlapic(svm_sc, vcpu, vlapic,
2011 2010                      inject_state);
2012 2011  
2013      -                if (vcpu_suspended(evinfo)) {
     2012 +                /*
     2013 +                 * Check for vCPU bail-out conditions.  This must be done after
     2014 +                 * svm_inject_events() to detect a triple-fault condition.
     2015 +                 */
     2016 +                if (vcpu_entry_bailout_checks(vm, vcpu, state->rip)) {
2014 2017                          enable_gintr();
2015      -                        vm_exit_suspended(vm, vcpu, state->rip);
2016 2018                          break;
2017 2019                  }
2018 2020  
2019      -                if (vcpu_runblocked(evinfo)) {
     2021 +                if (vcpu_run_state_pending(vm, vcpu)) {
2020 2022                          enable_gintr();
2021      -                        vm_exit_runblock(vm, vcpu, state->rip);
     2023 +                        vm_exit_run_state(vm, vcpu, state->rip);
2022 2024                          break;
2023 2025                  }
2024 2026  
2025      -                if (vcpu_reqidle(evinfo)) {
2026      -                        enable_gintr();
2027      -                        vm_exit_reqidle(vm, vcpu, state->rip);
2028      -                        break;
2029      -                }
2030      -
2031      -                /* We are asked to give the cpu by scheduler. */
2032      -                if (vcpu_should_yield(vm, vcpu)) {
2033      -                        enable_gintr();
2034      -                        vm_exit_astpending(vm, vcpu, state->rip);
2035      -                        break;
2036      -                }
2037      -
2038      -                if (vcpu_debugged(vm, vcpu)) {
2039      -                        enable_gintr();
2040      -                        vm_exit_debug(vm, vcpu, state->rip);
2041      -                        break;
2042      -                }
2043      -
2044 2027                  /*
2045 2028                   * If subsequent activity queued events which require injection
2046 2029                   * handling, take another lap to handle them.
2047 2030                   */
2048 2031                  if (svm_inject_recheck(svm_sc, vcpu, inject_state)) {
2049 2032                          enable_gintr();
2050 2033                          handled = 1;
2051 2034                          continue;
2052 2035                  }
2053 2036  
↓ open down ↓ 242 lines elided ↑ open up ↑
2296 2279          /*
2297 2280           * XXX deal with CR3 and invalidate TLB entries tagged with the
2298 2281           * vcpu's ASID. This needs to be treated differently depending on
2299 2282           * whether 'running' is true/false.
2300 2283           */
2301 2284  
2302 2285          return (0);
2303 2286  }
2304 2287  
2305 2288  static int
2306      -svm_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
     2289 +svm_setdesc(void *arg, int vcpu, int reg, const struct seg_desc *desc)
2307 2290  {
2308 2291          struct vmcb *vmcb;
2309 2292          struct svm_softc *sc;
2310 2293          struct vmcb_segment *seg;
2311 2294  
2312 2295          sc = arg;
2313 2296          vmcb = svm_get_vmcb(sc, vcpu);
2314 2297  
2315 2298          switch (reg) {
2316 2299          case VM_REG_GUEST_CS:
↓ open down ↓ 218 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX