Print this page
13275 bhyve needs richer INIT/SIPI support
Reviewed by: Robert Mustacchi <rm@fingolfin.org>
Approved by: Gordon Ross <gordon.w.ross@gmail.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/i86pc/io/vmm/intel/vmx.c
+++ new/usr/src/uts/i86pc/io/vmm/intel/vmx.c
1 1 /*-
2 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 3 *
4 4 * Copyright (c) 2011 NetApp, Inc.
5 5 * All rights reserved.
6 6 * Copyright (c) 2018 Joyent, Inc.
7 7 *
8 8 * Redistribution and use in source and binary forms, with or without
9 9 * modification, are permitted provided that the following conditions
10 10 * are met:
11 11 * 1. Redistributions of source code must retain the above copyright
12 12 * notice, this list of conditions and the following disclaimer.
13 13 * 2. Redistributions in binary form must reproduce the above copyright
14 14 * notice, this list of conditions and the following disclaimer in the
15 15 * documentation and/or other materials provided with the distribution.
16 16 *
17 17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
18 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
21 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 27 * SUCH DAMAGE.
28 28 *
29 29 * $FreeBSD$
30 30 */
31 31 /*
32 32 * This file and its contents are supplied under the terms of the
33 33 * Common Development and Distribution License ("CDDL"), version 1.0.
34 34 * You may only use this file in accordance with the terms of version
35 35 * 1.0 of the CDDL.
36 36 *
37 37 * A full copy of the text of the CDDL should have accompanied this
38 38 * source. A copy of the CDDL is also available via the Internet at
39 39 * http://www.illumos.org/license/CDDL.
40 40 *
41 41 * Copyright 2015 Pluribus Networks Inc.
42 42 * Copyright 2018 Joyent, Inc.
43 43 * Copyright 2020 Oxide Computer Company
44 44 */
45 45
46 46 #include <sys/cdefs.h>
47 47 __FBSDID("$FreeBSD$");
48 48
49 49 #include <sys/param.h>
50 50 #include <sys/systm.h>
51 51 #include <sys/smp.h>
52 52 #include <sys/kernel.h>
53 53 #include <sys/malloc.h>
54 54 #include <sys/pcpu.h>
55 55 #include <sys/proc.h>
56 56 #include <sys/sysctl.h>
57 57
58 58 #ifndef __FreeBSD__
59 59 #include <sys/x86_archext.h>
60 60 #include <sys/smp_impldefs.h>
61 61 #include <sys/smt.h>
62 62 #include <sys/hma.h>
63 63 #include <sys/trap.h>
64 64 #endif
65 65
66 66 #include <vm/vm.h>
67 67 #include <vm/pmap.h>
68 68
69 69 #include <machine/psl.h>
70 70 #include <machine/cpufunc.h>
71 71 #include <machine/md_var.h>
72 72 #include <machine/reg.h>
73 73 #include <machine/segments.h>
74 74 #include <machine/smp.h>
75 75 #include <machine/specialreg.h>
76 76 #include <machine/vmparam.h>
77 77
78 78 #include <machine/vmm.h>
79 79 #include <machine/vmm_dev.h>
80 80 #include <sys/vmm_instruction_emul.h>
81 81 #include "vmm_lapic.h"
82 82 #include "vmm_host.h"
83 83 #include "vmm_ioport.h"
84 84 #include "vmm_ktr.h"
85 85 #include "vmm_stat.h"
86 86 #include "vatpic.h"
87 87 #include "vlapic.h"
88 88 #include "vlapic_priv.h"
89 89
90 90 #include "ept.h"
91 91 #include "vmcs.h"
92 92 #include "vmx.h"
93 93 #include "vmx_msr.h"
94 94 #include "x86.h"
95 95 #include "vmx_controls.h"
96 96
97 97 #define PINBASED_CTLS_ONE_SETTING \
98 98 (PINBASED_EXTINT_EXITING | \
99 99 PINBASED_NMI_EXITING | \
100 100 PINBASED_VIRTUAL_NMI)
101 101 #define PINBASED_CTLS_ZERO_SETTING 0
102 102
103 103 #define PROCBASED_CTLS_WINDOW_SETTING \
104 104 (PROCBASED_INT_WINDOW_EXITING | \
105 105 PROCBASED_NMI_WINDOW_EXITING)
106 106
107 107 #ifdef __FreeBSD__
108 108 #define PROCBASED_CTLS_ONE_SETTING \
109 109 (PROCBASED_SECONDARY_CONTROLS | \
110 110 PROCBASED_MWAIT_EXITING | \
111 111 PROCBASED_MONITOR_EXITING | \
112 112 PROCBASED_IO_EXITING | \
113 113 PROCBASED_MSR_BITMAPS | \
114 114 PROCBASED_CTLS_WINDOW_SETTING | \
115 115 PROCBASED_CR8_LOAD_EXITING | \
116 116 PROCBASED_CR8_STORE_EXITING)
117 117 #else
118 118 /* We consider TSC offset a necessity for unsynched TSC handling */
119 119 #define PROCBASED_CTLS_ONE_SETTING \
120 120 (PROCBASED_SECONDARY_CONTROLS | \
121 121 PROCBASED_TSC_OFFSET | \
122 122 PROCBASED_MWAIT_EXITING | \
123 123 PROCBASED_MONITOR_EXITING | \
124 124 PROCBASED_IO_EXITING | \
125 125 PROCBASED_MSR_BITMAPS | \
126 126 PROCBASED_CTLS_WINDOW_SETTING | \
127 127 PROCBASED_CR8_LOAD_EXITING | \
128 128 PROCBASED_CR8_STORE_EXITING)
129 129 #endif /* __FreeBSD__ */
130 130
131 131 #define PROCBASED_CTLS_ZERO_SETTING \
132 132 (PROCBASED_CR3_LOAD_EXITING | \
133 133 PROCBASED_CR3_STORE_EXITING | \
134 134 PROCBASED_IO_BITMAPS)
135 135
136 136 /*
137 137 * EPT and Unrestricted Guest are considered necessities. The latter is not a
138 138 * requirement on FreeBSD, where grub2-bhyve is used to load guests directly
139 139 * without a bootrom starting in real mode.
140 140 */
141 141 #define PROCBASED_CTLS2_ONE_SETTING \
142 142 (PROCBASED2_ENABLE_EPT | \
143 143 PROCBASED2_UNRESTRICTED_GUEST)
144 144 #define PROCBASED_CTLS2_ZERO_SETTING 0
145 145
146 146 #define VM_EXIT_CTLS_ONE_SETTING \
147 147 (VM_EXIT_SAVE_DEBUG_CONTROLS | \
148 148 VM_EXIT_HOST_LMA | \
149 149 VM_EXIT_LOAD_PAT | \
150 150 VM_EXIT_SAVE_EFER | \
151 151 VM_EXIT_LOAD_EFER | \
152 152 VM_EXIT_ACKNOWLEDGE_INTERRUPT)
153 153
154 154 #define VM_EXIT_CTLS_ZERO_SETTING 0
155 155
156 156 #define VM_ENTRY_CTLS_ONE_SETTING \
157 157 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \
158 158 VM_ENTRY_LOAD_EFER)
159 159
160 160 #define VM_ENTRY_CTLS_ZERO_SETTING \
161 161 (VM_ENTRY_INTO_SMM | \
162 162 VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
163 163
164 164 #define HANDLED 1
165 165 #define UNHANDLED 0
166 166
167 167 static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
168 168 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
169 169
170 170 SYSCTL_DECL(_hw_vmm);
171 171 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
172 172 NULL);
173 173
174 174 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
175 175 static uint32_t exit_ctls, entry_ctls;
176 176
177 177 static uint64_t cr0_ones_mask, cr0_zeros_mask;
178 178
179 179 static uint64_t cr4_ones_mask, cr4_zeros_mask;
180 180
181 181 static int vmx_initialized;
182 182
183 183 /* Do not flush RSB upon vmexit */
184 184 static int no_flush_rsb;
185 185
186 186 /*
187 187 * Optional capabilities
188 188 */
189 189 #ifdef __FreeBSD__
190 190 SYSCTL_DECL(_hw_vmm_vmx);
191 191 static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap,
192 192 CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
193 193 NULL);
194 194 #endif
195 195
196 196 /* HLT triggers a VM-exit */
197 197 static int cap_halt_exit;
198 198
199 199 /* PAUSE triggers a VM-exit */
200 200 static int cap_pause_exit;
201 201
202 202 /* Monitor trap flag */
203 203 static int cap_monitor_trap;
204 204
205 205 /* Guests are allowed to use INVPCID */
206 206 static int cap_invpcid;
207 207
208 208 /* Extra capabilities (VMX_CAP_*) beyond the minimum */
209 209 static enum vmx_caps vmx_capabilities;
210 210
211 211 /* APICv posted interrupt vector */
212 212 static int pirvec = -1;
213 213
214 214 #ifdef __FreeBSD__
215 215 static struct unrhdr *vpid_unr;
216 216 #endif /* __FreeBSD__ */
217 217 static uint_t vpid_alloc_failed;
218 218
219 219 int guest_l1d_flush;
220 220 int guest_l1d_flush_sw;
221 221
222 222 /* MSR save region is composed of an array of 'struct msr_entry' */
223 223 struct msr_entry {
224 224 uint32_t index;
225 225 uint32_t reserved;
226 226 uint64_t val;
227 227 };
228 228
229 229 static struct msr_entry msr_load_list[1] __aligned(16);
230 230
231 231 /*
232 232 * The definitions of SDT probes for VMX.
233 233 */
234 234
235 235 /* BEGIN CSTYLED */
236 236 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry,
237 237 "struct vmx *", "int", "struct vm_exit *");
238 238
239 239 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch,
240 240 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *");
241 241
242 242 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess,
243 243 "struct vmx *", "int", "struct vm_exit *", "uint64_t");
244 244
245 245 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr,
246 246 "struct vmx *", "int", "struct vm_exit *", "uint32_t");
247 247
248 248 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr,
249 249 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t");
250 250
251 251 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt,
252 252 "struct vmx *", "int", "struct vm_exit *");
253 253
254 254 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap,
255 255 "struct vmx *", "int", "struct vm_exit *");
256 256
257 257 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause,
258 258 "struct vmx *", "int", "struct vm_exit *");
259 259
260 260 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow,
261 261 "struct vmx *", "int", "struct vm_exit *");
262 262
263 263 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt,
264 264 "struct vmx *", "int", "struct vm_exit *", "uint32_t");
265 265
266 266 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow,
267 267 "struct vmx *", "int", "struct vm_exit *");
268 268
269 269 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout,
270 270 "struct vmx *", "int", "struct vm_exit *");
271 271
272 272 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid,
273 273 "struct vmx *", "int", "struct vm_exit *");
274 274
275 275 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception,
276 276 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int");
277 277
278 278 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault,
279 279 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t");
280 280
281 281 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault,
282 282 "struct vmx *", "int", "struct vm_exit *", "uint64_t");
283 283
284 284 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi,
285 285 "struct vmx *", "int", "struct vm_exit *");
286 286
287 287 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess,
288 288 "struct vmx *", "int", "struct vm_exit *");
289 289
290 290 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite,
291 291 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *");
292 292
293 293 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv,
294 294 "struct vmx *", "int", "struct vm_exit *");
295 295
296 296 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor,
297 297 "struct vmx *", "int", "struct vm_exit *");
298 298
299 299 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait,
300 300 "struct vmx *", "int", "struct vm_exit *");
301 301
302 302 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn,
303 303 "struct vmx *", "int", "struct vm_exit *");
304 304
305 305 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown,
306 306 "struct vmx *", "int", "struct vm_exit *", "uint32_t");
307 307
308 308 SDT_PROBE_DEFINE4(vmm, vmx, exit, return,
309 309 "struct vmx *", "int", "struct vm_exit *", "int");
310 310 /* END CSTYLED */
311 311
312 312 /*
313 313 * Use the last page below 4GB as the APIC access address. This address is
314 314 * occupied by the boot firmware so it is guaranteed that it will not conflict
315 315 * with a page in system memory.
316 316 */
317 317 #define APIC_ACCESS_ADDRESS 0xFFFFF000
318 318
319 319 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
320 320 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
321 321 static void vmx_apply_tsc_adjust(struct vmx *, int);
322 322 static void vmx_apicv_sync_tmr(struct vlapic *vlapic);
323 323 static void vmx_tpr_shadow_enter(struct vlapic *vlapic);
324 324 static void vmx_tpr_shadow_exit(struct vlapic *vlapic);
325 325
326 326 static int
327 327 vmx_allow_x2apic_msrs(struct vmx *vmx)
328 328 {
329 329 int i, error;
330 330
331 331 error = 0;
332 332
333 333 /*
334 334 * Allow readonly access to the following x2APIC MSRs from the guest.
335 335 */
336 336 error += guest_msr_ro(vmx, MSR_APIC_ID);
337 337 error += guest_msr_ro(vmx, MSR_APIC_VERSION);
338 338 error += guest_msr_ro(vmx, MSR_APIC_LDR);
339 339 error += guest_msr_ro(vmx, MSR_APIC_SVR);
340 340
341 341 for (i = 0; i < 8; i++)
342 342 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
343 343
344 344 for (i = 0; i < 8; i++)
345 345 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
346 346
347 347 for (i = 0; i < 8; i++)
348 348 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
349 349
350 350 error += guest_msr_ro(vmx, MSR_APIC_ESR);
351 351 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
352 352 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
353 353 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
354 354 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
355 355 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
356 356 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
357 357 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
358 358 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
359 359 error += guest_msr_ro(vmx, MSR_APIC_ICR);
360 360
361 361 /*
362 362 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
363 363 *
364 364 * These registers get special treatment described in the section
365 365 * "Virtualizing MSR-Based APIC Accesses".
366 366 */
367 367 error += guest_msr_rw(vmx, MSR_APIC_TPR);
368 368 error += guest_msr_rw(vmx, MSR_APIC_EOI);
369 369 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
370 370
371 371 return (error);
372 372 }
373 373
374 374 static ulong_t
375 375 vmx_fix_cr0(ulong_t cr0)
376 376 {
377 377 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
378 378 }
379 379
380 380 static ulong_t
381 381 vmx_fix_cr4(ulong_t cr4)
382 382 {
383 383 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
384 384 }
385 385
386 386 static void
387 387 vpid_free(int vpid)
388 388 {
389 389 if (vpid < 0 || vpid > 0xffff)
390 390 panic("vpid_free: invalid vpid %d", vpid);
391 391
392 392 /*
393 393 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
394 394 * the unit number allocator.
395 395 */
396 396
397 397 if (vpid > VM_MAXCPU)
398 398 #ifdef __FreeBSD__
399 399 free_unr(vpid_unr, vpid);
400 400 #else
401 401 hma_vmx_vpid_free((uint16_t)vpid);
402 402 #endif
403 403 }
404 404
405 405 static void
406 406 vpid_alloc(uint16_t *vpid, int num)
407 407 {
408 408 int i, x;
409 409
410 410 if (num <= 0 || num > VM_MAXCPU)
411 411 panic("invalid number of vpids requested: %d", num);
412 412
413 413 /*
414 414 * If the "enable vpid" execution control is not enabled then the
415 415 * VPID is required to be 0 for all vcpus.
416 416 */
417 417 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
418 418 for (i = 0; i < num; i++)
419 419 vpid[i] = 0;
420 420 return;
421 421 }
422 422
423 423 /*
424 424 * Allocate a unique VPID for each vcpu from the unit number allocator.
425 425 */
426 426 for (i = 0; i < num; i++) {
427 427 #ifdef __FreeBSD__
428 428 x = alloc_unr(vpid_unr);
429 429 #else
430 430 uint16_t tmp;
431 431
432 432 tmp = hma_vmx_vpid_alloc();
433 433 x = (tmp == 0) ? -1 : tmp;
434 434 #endif
435 435 if (x == -1)
436 436 break;
437 437 else
438 438 vpid[i] = x;
439 439 }
440 440
441 441 if (i < num) {
442 442 atomic_add_int(&vpid_alloc_failed, 1);
443 443
444 444 /*
445 445 * If the unit number allocator does not have enough unique
446 446 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
447 447 *
448 448 * These VPIDs are not be unique across VMs but this does not
449 449 * affect correctness because the combined mappings are also
450 450 * tagged with the EP4TA which is unique for each VM.
451 451 *
452 452 * It is still sub-optimal because the invvpid will invalidate
453 453 * combined mappings for a particular VPID across all EP4TAs.
454 454 */
455 455 while (i-- > 0)
456 456 vpid_free(vpid[i]);
457 457
458 458 for (i = 0; i < num; i++)
459 459 vpid[i] = i + 1;
460 460 }
461 461 }
462 462
463 463 static int
464 464 vmx_cleanup(void)
465 465 {
466 466 /* This is taken care of by the hma registration */
467 467 return (0);
468 468 }
469 469
470 470 static void
471 471 vmx_restore(void)
472 472 {
473 473 /* No-op on illumos */
474 474 }
475 475
476 476 static int
477 477 vmx_init(int ipinum)
478 478 {
479 479 int error;
480 480 uint64_t fixed0, fixed1;
481 481 uint32_t tmp;
482 482 enum vmx_caps avail_caps = VMX_CAP_NONE;
483 483
484 484 /* Check support for primary processor-based VM-execution controls */
485 485 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
486 486 MSR_VMX_TRUE_PROCBASED_CTLS,
487 487 PROCBASED_CTLS_ONE_SETTING,
488 488 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
489 489 if (error) {
490 490 printf("vmx_init: processor does not support desired primary "
491 491 "processor-based controls\n");
492 492 return (error);
493 493 }
494 494
495 495 /* Clear the processor-based ctl bits that are set on demand */
496 496 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
497 497
498 498 /* Check support for secondary processor-based VM-execution controls */
499 499 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
500 500 MSR_VMX_PROCBASED_CTLS2,
501 501 PROCBASED_CTLS2_ONE_SETTING,
502 502 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
503 503 if (error) {
504 504 printf("vmx_init: processor does not support desired secondary "
505 505 "processor-based controls\n");
506 506 return (error);
507 507 }
508 508
509 509 /* Check support for VPID */
510 510 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
511 511 MSR_VMX_PROCBASED_CTLS2,
512 512 PROCBASED2_ENABLE_VPID,
513 513 0, &tmp);
514 514 if (error == 0)
515 515 procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
516 516
517 517 /* Check support for pin-based VM-execution controls */
518 518 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
519 519 MSR_VMX_TRUE_PINBASED_CTLS,
520 520 PINBASED_CTLS_ONE_SETTING,
521 521 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
522 522 if (error) {
523 523 printf("vmx_init: processor does not support desired "
524 524 "pin-based controls\n");
525 525 return (error);
526 526 }
527 527
528 528 /* Check support for VM-exit controls */
529 529 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
530 530 VM_EXIT_CTLS_ONE_SETTING,
531 531 VM_EXIT_CTLS_ZERO_SETTING,
532 532 &exit_ctls);
533 533 if (error) {
534 534 printf("vmx_init: processor does not support desired "
535 535 "exit controls\n");
536 536 return (error);
537 537 }
538 538
539 539 /* Check support for VM-entry controls */
540 540 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
541 541 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
542 542 &entry_ctls);
543 543 if (error) {
544 544 printf("vmx_init: processor does not support desired "
545 545 "entry controls\n");
546 546 return (error);
547 547 }
548 548
549 549 /*
550 550 * Check support for optional features by testing them
551 551 * as individual bits
552 552 */
553 553 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
554 554 MSR_VMX_TRUE_PROCBASED_CTLS,
555 555 PROCBASED_HLT_EXITING, 0,
556 556 &tmp) == 0);
557 557
558 558 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
559 559 MSR_VMX_PROCBASED_CTLS,
560 560 PROCBASED_MTF, 0,
561 561 &tmp) == 0);
562 562
563 563 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
564 564 MSR_VMX_TRUE_PROCBASED_CTLS,
565 565 PROCBASED_PAUSE_EXITING, 0,
566 566 &tmp) == 0);
567 567
568 568 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
569 569 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
570 570 &tmp) == 0);
571 571
572 572 /*
573 573 * Check for APIC virtualization capabilities:
574 574 * - TPR shadowing
575 575 * - Full APICv (with or without x2APIC support)
576 576 * - Posted interrupt handling
577 577 */
578 578 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS,
579 579 PROCBASED_USE_TPR_SHADOW, 0, &tmp) == 0) {
580 580 avail_caps |= VMX_CAP_TPR_SHADOW;
581 581
582 582 const uint32_t apicv_bits =
583 583 PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
584 584 PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
585 585 PROCBASED2_VIRTUALIZE_X2APIC_MODE |
586 586 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY;
587 587 if (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
588 588 MSR_VMX_PROCBASED_CTLS2, apicv_bits, 0, &tmp) == 0) {
589 589 avail_caps |= VMX_CAP_APICV;
590 590
591 591 /*
592 592 * It may make sense in the future to differentiate
593 593 * hardware (or software) configurations with APICv but
594 594 * no support for accelerating x2APIC mode.
595 595 */
596 596 avail_caps |= VMX_CAP_APICV_X2APIC;
597 597
598 598 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
599 599 MSR_VMX_TRUE_PINBASED_CTLS,
600 600 PINBASED_POSTED_INTERRUPT, 0, &tmp);
601 601 if (error == 0) {
602 602 /*
603 603 * If the PSM-provided interfaces for requesting
604 604 * and using a PIR IPI vector are present, use
605 605 * them for posted interrupts.
606 606 */
607 607 if (psm_get_pir_ipivect != NULL &&
608 608 psm_send_pir_ipi != NULL) {
609 609 pirvec = psm_get_pir_ipivect();
610 610 avail_caps |= VMX_CAP_APICV_PIR;
611 611 }
612 612 }
613 613 }
614 614 }
615 615
616 616 /* Initialize EPT */
617 617 error = ept_init(ipinum);
618 618 if (error) {
619 619 printf("vmx_init: ept initialization failed (%d)\n", error);
620 620 return (error);
621 621 }
622 622
623 623 #ifdef __FreeBSD__
624 624 guest_l1d_flush = (cpu_ia32_arch_caps &
625 625 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0;
626 626 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush);
627 627
628 628 /*
629 629 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when
630 630 * available. Otherwise fall back to the software flush
631 631 * method which loads enough data from the kernel text to
632 632 * flush existing L1D content, both on VMX entry and on NMI
633 633 * return.
634 634 */
635 635 if (guest_l1d_flush) {
636 636 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) {
637 637 guest_l1d_flush_sw = 1;
638 638 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw",
639 639 &guest_l1d_flush_sw);
640 640 }
641 641 if (guest_l1d_flush_sw) {
642 642 if (nmi_flush_l1d_sw <= 1)
643 643 nmi_flush_l1d_sw = 1;
644 644 } else {
645 645 msr_load_list[0].index = MSR_IA32_FLUSH_CMD;
646 646 msr_load_list[0].val = IA32_FLUSH_CMD_L1D;
647 647 }
648 648 }
649 649 #else
650 650 /* L1D flushing is taken care of by smt_acquire() and friends */
651 651 guest_l1d_flush = 0;
652 652 #endif /* __FreeBSD__ */
653 653
654 654 /*
655 655 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
656 656 */
657 657 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
658 658 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
659 659 cr0_ones_mask = fixed0 & fixed1;
660 660 cr0_zeros_mask = ~fixed0 & ~fixed1;
661 661
662 662 /*
663 663 * Since Unrestricted Guest was already verified present, CR0_PE and
664 664 * CR0_PG are allowed to be set to zero in VMX non-root operation
665 665 */
666 666 cr0_ones_mask &= ~(CR0_PG | CR0_PE);
667 667
668 668 /*
669 669 * Do not allow the guest to set CR0_NW or CR0_CD.
670 670 */
671 671 cr0_zeros_mask |= (CR0_NW | CR0_CD);
672 672
673 673 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
674 674 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
675 675 cr4_ones_mask = fixed0 & fixed1;
676 676 cr4_zeros_mask = ~fixed0 & ~fixed1;
677 677
678 678 vmx_msr_init();
679 679
680 680 vmx_capabilities = avail_caps;
681 681 vmx_initialized = 1;
682 682
683 683 return (0);
684 684 }
685 685
686 686 static void
687 687 vmx_trigger_hostintr(int vector)
688 688 {
689 689 #ifdef __FreeBSD__
690 690 uintptr_t func;
691 691 struct gate_descriptor *gd;
692 692
693 693 gd = &idt[vector];
694 694
695 695 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
696 696 "invalid vector %d", vector));
697 697 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
698 698 vector));
699 699 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
700 700 "has invalid type %d", vector, gd->gd_type));
701 701 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
702 702 "has invalid dpl %d", vector, gd->gd_dpl));
703 703 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
704 704 "for vector %d has invalid selector %d", vector, gd->gd_selector));
705 705 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
706 706 "IST %d", vector, gd->gd_ist));
707 707
708 708 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
709 709 vmx_call_isr(func);
710 710 #else
711 711 VERIFY(vector >= 32 && vector <= 255);
712 712 vmx_call_isr(vector - 32);
713 713 #endif /* __FreeBSD__ */
714 714 }
715 715
716 716 static void *
717 717 vmx_vminit(struct vm *vm, pmap_t pmap)
718 718 {
719 719 uint16_t vpid[VM_MAXCPU];
720 720 int i, error, datasel;
721 721 struct vmx *vmx;
722 722 uint32_t exc_bitmap;
723 723 uint16_t maxcpus;
724 724 uint32_t proc_ctls, proc2_ctls, pin_ctls;
725 725
726 726 vmx = malloc(sizeof (struct vmx), M_VMX, M_WAITOK | M_ZERO);
727 727 if ((uintptr_t)vmx & PAGE_MASK) {
728 728 panic("malloc of struct vmx not aligned on %d byte boundary",
729 729 PAGE_SIZE);
730 730 }
731 731 vmx->vm = vm;
732 732
733 733 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
734 734
735 735 /*
736 736 * Clean up EPTP-tagged guest physical and combined mappings
737 737 *
738 738 * VMX transitions are not required to invalidate any guest physical
739 739 * mappings. So, it may be possible for stale guest physical mappings
740 740 * to be present in the processor TLBs.
741 741 *
742 742 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
743 743 */
744 744 ept_invalidate_mappings(vmx->eptp);
745 745
746 746 msr_bitmap_initialize(vmx->msr_bitmap);
747 747
748 748 /*
749 749 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
750 750 * The guest FSBASE and GSBASE are saved and restored during
751 751 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
752 752 * always restored from the vmcs host state area on vm-exit.
753 753 *
754 754 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
755 755 * how they are saved/restored so can be directly accessed by the
756 756 * guest.
757 757 *
758 758 * MSR_EFER is saved and restored in the guest VMCS area on a
759 759 * VM exit and entry respectively. It is also restored from the
760 760 * host VMCS area on a VM exit.
761 761 *
762 762 * The TSC MSR is exposed read-only. Writes are disallowed as
763 763 * that will impact the host TSC. If the guest does a write
764 764 * the "use TSC offsetting" execution control is enabled and the
765 765 * difference between the host TSC and the guest TSC is written
766 766 * into the TSC offset in the VMCS.
767 767 */
768 768 if (guest_msr_rw(vmx, MSR_GSBASE) ||
769 769 guest_msr_rw(vmx, MSR_FSBASE) ||
770 770 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
771 771 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
772 772 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
773 773 guest_msr_rw(vmx, MSR_EFER) ||
774 774 guest_msr_ro(vmx, MSR_TSC))
775 775 panic("vmx_vminit: error setting guest msr access");
776 776
777 777 vpid_alloc(vpid, VM_MAXCPU);
778 778
779 779 /* Grab the established defaults */
780 780 proc_ctls = procbased_ctls;
781 781 proc2_ctls = procbased_ctls2;
782 782 pin_ctls = pinbased_ctls;
783 783 /* For now, default to the available capabilities */
784 784 vmx->vmx_caps = vmx_capabilities;
785 785
786 786 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) {
787 787 proc_ctls |= PROCBASED_USE_TPR_SHADOW;
788 788 proc_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
789 789 proc_ctls &= ~PROCBASED_CR8_STORE_EXITING;
790 790 }
791 791 if (vmx_cap_en(vmx, VMX_CAP_APICV)) {
792 792 ASSERT(vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW));
793 793
794 794 proc2_ctls |= (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
795 795 PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
796 796 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
797 797
798 798 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
799 799 APIC_ACCESS_ADDRESS);
800 800 /* XXX this should really return an error to the caller */
801 801 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
802 802 }
803 803 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) {
804 804 ASSERT(vmx_cap_en(vmx, VMX_CAP_APICV));
805 805
806 806 pin_ctls |= PINBASED_POSTED_INTERRUPT;
807 807 }
808 808
809 809 maxcpus = vm_get_maxcpus(vm);
810 810 datasel = vmm_get_host_datasel();
811 811 for (i = 0; i < maxcpus; i++) {
812 812 /*
813 813 * Cache physical address lookups for various components which
814 814 * may be required inside the critical_enter() section implied
815 815 * by VMPTRLD() below.
816 816 */
817 817 vm_paddr_t msr_bitmap_pa = vtophys(vmx->msr_bitmap);
818 818 vm_paddr_t apic_page_pa = vtophys(&vmx->apic_page[i]);
819 819 vm_paddr_t pir_desc_pa = vtophys(&vmx->pir_desc[i]);
820 820
821 821 vmx->vmcs_pa[i] = (uintptr_t)vtophys(&vmx->vmcs[i]);
822 822 vmcs_initialize(&vmx->vmcs[i], vmx->vmcs_pa[i]);
823 823
824 824 vmx_msr_guest_init(vmx, i);
825 825
826 826 vmcs_load(vmx->vmcs_pa[i]);
827 827
828 828 vmcs_write(VMCS_HOST_IA32_PAT, vmm_get_host_pat());
829 829 vmcs_write(VMCS_HOST_IA32_EFER, vmm_get_host_efer());
830 830
831 831 /* Load the control registers */
832 832 vmcs_write(VMCS_HOST_CR0, vmm_get_host_cr0());
833 833 vmcs_write(VMCS_HOST_CR4, vmm_get_host_cr4() | CR4_VMXE);
834 834
835 835 /* Load the segment selectors */
836 836 vmcs_write(VMCS_HOST_CS_SELECTOR, vmm_get_host_codesel());
837 837
838 838 vmcs_write(VMCS_HOST_ES_SELECTOR, datasel);
839 839 vmcs_write(VMCS_HOST_SS_SELECTOR, datasel);
840 840 vmcs_write(VMCS_HOST_DS_SELECTOR, datasel);
841 841
842 842 vmcs_write(VMCS_HOST_FS_SELECTOR, vmm_get_host_fssel());
843 843 vmcs_write(VMCS_HOST_GS_SELECTOR, vmm_get_host_gssel());
844 844 vmcs_write(VMCS_HOST_TR_SELECTOR, vmm_get_host_tsssel());
845 845
846 846 /*
847 847 * Configure host sysenter MSRs to be restored on VM exit.
848 848 * The thread-specific MSR_INTC_SEP_ESP value is loaded in
849 849 * vmx_run.
850 850 */
851 851 vmcs_write(VMCS_HOST_IA32_SYSENTER_CS, KCS_SEL);
852 852 vmcs_write(VMCS_HOST_IA32_SYSENTER_EIP,
853 853 rdmsr(MSR_SYSENTER_EIP_MSR));
854 854
855 855 /* instruction pointer */
856 856 if (no_flush_rsb) {
857 857 vmcs_write(VMCS_HOST_RIP, (uint64_t)vmx_exit_guest);
858 858 } else {
859 859 vmcs_write(VMCS_HOST_RIP,
860 860 (uint64_t)vmx_exit_guest_flush_rsb);
861 861 }
862 862
863 863 /* link pointer */
864 864 vmcs_write(VMCS_LINK_POINTER, ~0);
865 865
866 866 vmcs_write(VMCS_EPTP, vmx->eptp);
867 867 vmcs_write(VMCS_PIN_BASED_CTLS, pin_ctls);
868 868 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls);
869 869 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc2_ctls);
870 870 vmcs_write(VMCS_EXIT_CTLS, exit_ctls);
871 871 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
872 872 vmcs_write(VMCS_MSR_BITMAP, msr_bitmap_pa);
873 873 vmcs_write(VMCS_VPID, vpid[i]);
874 874
875 875 if (guest_l1d_flush && !guest_l1d_flush_sw) {
876 876 vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract(
877 877 (vm_offset_t)&msr_load_list[0]));
878 878 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT,
879 879 nitems(msr_load_list));
880 880 vmcs_write(VMCS_EXIT_MSR_STORE, 0);
881 881 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0);
882 882 }
883 883
884 884 /* exception bitmap */
885 885 if (vcpu_trace_exceptions(vm, i))
886 886 exc_bitmap = 0xffffffff;
887 887 else
888 888 exc_bitmap = 1 << IDT_MC;
889 889 vmcs_write(VMCS_EXCEPTION_BITMAP, exc_bitmap);
890 890
891 891 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1;
892 892 vmcs_write(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1);
893 893
894 894 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) {
895 895 vmcs_write(VMCS_VIRTUAL_APIC, apic_page_pa);
896 896 }
897 897
898 898 if (vmx_cap_en(vmx, VMX_CAP_APICV)) {
899 899 vmcs_write(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
900 900 vmcs_write(VMCS_EOI_EXIT0, 0);
901 901 vmcs_write(VMCS_EOI_EXIT1, 0);
902 902 vmcs_write(VMCS_EOI_EXIT2, 0);
903 903 vmcs_write(VMCS_EOI_EXIT3, 0);
904 904 }
905 905 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) {
906 906 vmcs_write(VMCS_PIR_VECTOR, pirvec);
907 907 vmcs_write(VMCS_PIR_DESC, pir_desc_pa);
908 908 }
909 909
910 910 /*
911 911 * Set up the CR0/4 masks and configure the read shadow state
912 912 * to the power-on register value from the Intel Sys Arch.
913 913 * CR0 - 0x60000010
914 914 * CR4 - 0
915 915 */
916 916 vmcs_write(VMCS_CR0_MASK, cr0_ones_mask | cr0_zeros_mask);
917 917 vmcs_write(VMCS_CR0_SHADOW, 0x60000010);
918 918 vmcs_write(VMCS_CR4_MASK, cr4_ones_mask | cr4_zeros_mask);
919 919 vmcs_write(VMCS_CR4_SHADOW, 0);
920 920
921 921 vmcs_clear(vmx->vmcs_pa[i]);
922 922
923 923 vmx->cap[i].set = 0;
924 924 vmx->cap[i].proc_ctls = proc_ctls;
925 925 vmx->cap[i].proc_ctls2 = proc2_ctls;
926 926 vmx->cap[i].exc_bitmap = exc_bitmap;
927 927
928 928 vmx->state[i].nextrip = ~0;
929 929 vmx->state[i].lastcpu = NOCPU;
930 930 vmx->state[i].vpid = vpid[i];
931 931
932 932
933 933 vmx->ctx[i].pmap = pmap;
934 934 }
935 935
936 936 return (vmx);
937 937 }
938 938
939 939 static int
940 940 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
941 941 {
942 942 #ifdef __FreeBSD__
943 943 int handled, func;
944 944
945 945 func = vmxctx->guest_rax;
946 946 #else
947 947 int handled;
948 948 #endif
949 949
950 950 handled = x86_emulate_cpuid(vm, vcpu, (uint64_t *)&vmxctx->guest_rax,
951 951 (uint64_t *)&vmxctx->guest_rbx, (uint64_t *)&vmxctx->guest_rcx,
952 952 (uint64_t *)&vmxctx->guest_rdx);
953 953 return (handled);
954 954 }
955 955
956 956 static __inline void
957 957 vmx_run_trace(struct vmx *vmx, int vcpu)
958 958 {
959 959 #ifdef KTR
960 960 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %lx", vmcs_guest_rip());
961 961 #endif
962 962 }
963 963
964 964 static __inline void
965 965 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
966 966 {
967 967 #ifdef KTR
968 968 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
969 969 #endif
970 970 }
971 971
972 972 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
973 973 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
974 974
975 975 #define INVVPID_TYPE_ADDRESS 0UL
976 976 #define INVVPID_TYPE_SINGLE_CONTEXT 1UL
977 977 #define INVVPID_TYPE_ALL_CONTEXTS 2UL
978 978
979 979 struct invvpid_desc {
980 980 uint16_t vpid;
981 981 uint16_t _res1;
982 982 uint32_t _res2;
983 983 uint64_t linear_addr;
984 984 };
985 985 CTASSERT(sizeof (struct invvpid_desc) == 16);
986 986
987 987 static __inline void
988 988 invvpid(uint64_t type, struct invvpid_desc desc)
989 989 {
990 990 int error;
991 991
992 992 __asm __volatile("invvpid %[desc], %[type];"
993 993 VMX_SET_ERROR_CODE_ASM
994 994 : [error] "=r" (error)
995 995 : [desc] "m" (desc), [type] "r" (type)
996 996 : "memory");
997 997
998 998 if (error)
999 999 panic("invvpid error %d", error);
1000 1000 }
1001 1001
1002 1002 /*
1003 1003 * Invalidate guest mappings identified by its vpid from the TLB.
1004 1004 */
1005 1005 static __inline void
1006 1006 vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1007 1007 {
1008 1008 struct vmxstate *vmxstate;
1009 1009 struct invvpid_desc invvpid_desc;
1010 1010
1011 1011 vmxstate = &vmx->state[vcpu];
1012 1012 if (vmxstate->vpid == 0)
1013 1013 return;
1014 1014
1015 1015 if (!running) {
1016 1016 /*
1017 1017 * Set the 'lastcpu' to an invalid host cpu.
1018 1018 *
1019 1019 * This will invalidate TLB entries tagged with the vcpu's
1020 1020 * vpid the next time it runs via vmx_set_pcpu_defaults().
1021 1021 */
1022 1022 vmxstate->lastcpu = NOCPU;
1023 1023 return;
1024 1024 }
1025 1025
1026 1026 #ifdef __FreeBSD__
1027 1027 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1028 1028 "critical section", __func__, vcpu));
1029 1029 #endif
1030 1030
1031 1031 /*
1032 1032 * Invalidate all mappings tagged with 'vpid'
1033 1033 *
1034 1034 * We do this because this vcpu was executing on a different host
1035 1035 * cpu when it last ran. We do not track whether it invalidated
1036 1036 * mappings associated with its 'vpid' during that run. So we must
1037 1037 * assume that the mappings associated with 'vpid' on 'curcpu' are
1038 1038 * stale and invalidate them.
1039 1039 *
1040 1040 * Note that we incur this penalty only when the scheduler chooses to
1041 1041 * move the thread associated with this vcpu between host cpus.
1042 1042 *
1043 1043 * Note also that this will invalidate mappings tagged with 'vpid'
1044 1044 * for "all" EP4TAs.
1045 1045 */
1046 1046 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1047 1047 invvpid_desc._res1 = 0;
1048 1048 invvpid_desc._res2 = 0;
1049 1049 invvpid_desc.vpid = vmxstate->vpid;
1050 1050 invvpid_desc.linear_addr = 0;
1051 1051 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1052 1052 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1053 1053 } else {
1054 1054 /*
1055 1055 * The invvpid can be skipped if an invept is going to
1056 1056 * be performed before entering the guest. The invept
1057 1057 * will invalidate combined mappings tagged with
1058 1058 * 'vmx->eptp' for all vpids.
1059 1059 */
1060 1060 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1061 1061 }
1062 1062 }
1063 1063
1064 1064 static void
1065 1065 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1066 1066 {
1067 1067 struct vmxstate *vmxstate;
1068 1068
1069 1069 /*
1070 1070 * Regardless of whether the VM appears to have migrated between CPUs,
1071 1071 * save the host sysenter stack pointer. As it points to the kernel
1072 1072 * stack of each thread, the correct value must be maintained for every
1073 1073 * trip into the critical section.
1074 1074 */
1075 1075 vmcs_write(VMCS_HOST_IA32_SYSENTER_ESP, rdmsr(MSR_SYSENTER_ESP_MSR));
1076 1076
1077 1077 /*
1078 1078 * Perform any needed TSC_OFFSET adjustment based on TSC_MSR writes or
1079 1079 * migration between host CPUs with differing TSC values.
1080 1080 */
1081 1081 vmx_apply_tsc_adjust(vmx, vcpu);
1082 1082
1083 1083 vmxstate = &vmx->state[vcpu];
1084 1084 if (vmxstate->lastcpu == curcpu)
1085 1085 return;
1086 1086
1087 1087 vmxstate->lastcpu = curcpu;
1088 1088
1089 1089 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1090 1090
1091 1091 /* Load the per-CPU IDT address */
1092 1092 vmcs_write(VMCS_HOST_IDTR_BASE, vmm_get_host_idtrbase());
1093 1093 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1094 1094 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1095 1095 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1096 1096 vmx_invvpid(vmx, vcpu, pmap, 1);
1097 1097 }
1098 1098
1099 1099 /*
1100 1100 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1101 1101 */
1102 1102 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1103 1103
1104 1104 static __inline void
1105 1105 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1106 1106 {
1107 1107
1108 1108 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1109 1109 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1110 1110 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1111 1111 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1112 1112 }
1113 1113 }
1114 1114
1115 1115 static __inline void
1116 1116 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1117 1117 {
1118 1118
1119 1119 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1120 1120 ("intr_window_exiting not set: %x", vmx->cap[vcpu].proc_ctls));
1121 1121 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1122 1122 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1123 1123 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1124 1124 }
1125 1125
1126 1126 static __inline bool
1127 1127 vmx_nmi_window_exiting(struct vmx *vmx, int vcpu)
1128 1128 {
1129 1129 return ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0);
1130 1130 }
1131 1131
1132 1132 static __inline void
1133 1133 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1134 1134 {
1135 1135 if (!vmx_nmi_window_exiting(vmx, vcpu)) {
1136 1136 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1137 1137 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1138 1138 }
1139 1139 }
1140 1140
1141 1141 static __inline void
1142 1142 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1143 1143 {
1144 1144 ASSERT(vmx_nmi_window_exiting(vmx, vcpu));
1145 1145 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1146 1146 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1147 1147 }
1148 1148
1149 1149 /*
1150 1150 * Set the TSC adjustment, taking into account the offsets measured between
1151 1151 * host physical CPUs. This is required even if the guest has not set a TSC
1152 1152 * offset since vCPUs inherit the TSC offset of whatever physical CPU it has
1153 1153 * migrated onto. Without this mitigation, un-synched host TSCs will convey
1154 1154 * the appearance of TSC time-travel to the guest as its vCPUs migrate.
1155 1155 */
1156 1156 static void
1157 1157 vmx_apply_tsc_adjust(struct vmx *vmx, int vcpu)
1158 1158 {
1159 1159 extern hrtime_t tsc_gethrtime_tick_delta(void);
1160 1160 const uint64_t target_offset = (vcpu_tsc_offset(vmx->vm, vcpu) +
1161 1161 (uint64_t)tsc_gethrtime_tick_delta());
1162 1162
1163 1163 ASSERT(vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET);
1164 1164
1165 1165 if (vmx->tsc_offset_active[vcpu] != target_offset) {
1166 1166 vmcs_write(VMCS_TSC_OFFSET, target_offset);
1167 1167 vmx->tsc_offset_active[vcpu] = target_offset;
1168 1168 }
1169 1169 }
1170 1170
1171 1171 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \
1172 1172 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1173 1173 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \
1174 1174 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1175 1175
1176 1176 static void
1177 1177 vmx_inject_nmi(struct vmx *vmx, int vcpu)
1178 1178 {
1179 1179 ASSERT0(vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & NMI_BLOCKING);
1180 1180 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID);
1181 1181
1182 1182 /*
1183 1183 * Inject the virtual NMI. The vector must be the NMI IDT entry
1184 1184 * or the VMCS entry check will fail.
1185 1185 */
1186 1186 vmcs_write(VMCS_ENTRY_INTR_INFO,
1187 1187 IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID);
1188 1188
1189 1189 /* Clear the request */
1190 1190 vm_nmi_clear(vmx->vm, vcpu);
1191 1191 }
1192 1192
1193 1193 /*
1194 1194 * Inject exceptions, NMIs, and ExtINTs.
1195 1195 *
1196 1196 * The logic behind these are complicated and may involve mutex contention, so
1197 1197 * the injection is performed without the protection of host CPU interrupts
1198 1198 * being disabled. This means a racing notification could be "lost",
1199 1199 * necessitating a later call to vmx_inject_recheck() to close that window
1200 1200 * of opportunity.
1201 1201 */
1202 1202 static enum event_inject_state
1203 1203 vmx_inject_events(struct vmx *vmx, int vcpu, uint64_t rip)
1204 1204 {
1205 1205 uint64_t entryinfo;
1206 1206 uint32_t gi, info;
1207 1207 int vector;
1208 1208 enum event_inject_state state;
1209 1209
1210 1210 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1211 1211 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1212 1212 state = EIS_CAN_INJECT;
1213 1213
1214 1214 /* Clear any interrupt blocking if the guest %rip has changed */
1215 1215 if (vmx->state[vcpu].nextrip != rip && (gi & HWINTR_BLOCKING) != 0) {
1216 1216 gi &= ~HWINTR_BLOCKING;
1217 1217 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1218 1218 }
1219 1219
1220 1220 /*
1221 1221 * It could be that an interrupt is already pending for injection from
1222 1222 * the VMCS. This would be the case if the vCPU exited for conditions
1223 1223 * such as an AST before a vm-entry delivered the injection.
1224 1224 */
1225 1225 if ((info & VMCS_INTR_VALID) != 0) {
1226 1226 return (EIS_EV_EXISTING | EIS_REQ_EXIT);
1227 1227 }
1228 1228
1229 1229 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1230 1230 ASSERT(entryinfo & VMCS_INTR_VALID);
1231 1231
1232 1232 info = entryinfo;
1233 1233 vector = info & 0xff;
1234 1234 if (vector == IDT_BP || vector == IDT_OF) {
1235 1235 /*
1236 1236 * VT-x requires #BP and #OF to be injected as software
1237 1237 * exceptions.
1238 1238 */
1239 1239 info &= ~VMCS_INTR_T_MASK;
1240 1240 info |= VMCS_INTR_T_SWEXCEPTION;
1241 1241 }
1242 1242
1243 1243 if (info & VMCS_INTR_DEL_ERRCODE) {
1244 1244 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
1245 1245 }
1246 1246
1247 1247 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1248 1248 state = EIS_EV_INJECTED;
1249 1249 }
1250 1250
1251 1251 if (vm_nmi_pending(vmx->vm, vcpu)) {
1252 1252 /*
1253 1253 * If there are no conditions blocking NMI injection then inject
1254 1254 * it directly here otherwise enable "NMI window exiting" to
1255 1255 * inject it as soon as we can.
1256 1256 *
1257 1257 * According to the Intel manual, some CPUs do not allow NMI
1258 1258 * injection when STI_BLOCKING is active. That check is
1259 1259 * enforced here, regardless of CPU capability. If running on a
1260 1260 * CPU without such a restriction it will immediately exit and
1261 1261 * the NMI will be injected in the "NMI window exiting" handler.
1262 1262 */
1263 1263 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1264 1264 if (state == EIS_CAN_INJECT) {
1265 1265 vmx_inject_nmi(vmx, vcpu);
1266 1266 state = EIS_EV_INJECTED;
1267 1267 } else {
1268 1268 return (state | EIS_REQ_EXIT);
1269 1269 }
1270 1270 } else {
1271 1271 vmx_set_nmi_window_exiting(vmx, vcpu);
1272 1272 }
1273 1273 }
1274 1274
1275 1275 if (vm_extint_pending(vmx->vm, vcpu)) {
1276 1276 if (state != EIS_CAN_INJECT) {
1277 1277 return (state | EIS_REQ_EXIT);
1278 1278 }
1279 1279 if ((gi & HWINTR_BLOCKING) != 0 ||
1280 1280 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) {
1281 1281 return (EIS_GI_BLOCK);
1282 1282 }
1283 1283
1284 1284 /* Ask the legacy pic for a vector to inject */
1285 1285 vatpic_pending_intr(vmx->vm, &vector);
1286 1286
1287 1287 /*
1288 1288 * From the Intel SDM, Volume 3, Section "Maskable
1289 1289 * Hardware Interrupts":
1290 1290 * - maskable interrupt vectors [0,255] can be delivered
1291 1291 * through the INTR pin.
1292 1292 */
1293 1293 KASSERT(vector >= 0 && vector <= 255,
1294 1294 ("invalid vector %d from INTR", vector));
1295 1295
1296 1296 /* Inject the interrupt */
1297 1297 vmcs_write(VMCS_ENTRY_INTR_INFO,
1298 1298 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector);
1299 1299
1300 1300 vm_extint_clear(vmx->vm, vcpu);
1301 1301 vatpic_intr_accepted(vmx->vm, vector);
1302 1302 state = EIS_EV_INJECTED;
1303 1303 }
1304 1304
1305 1305 return (state);
1306 1306 }
1307 1307
1308 1308 /*
1309 1309 * Inject any interrupts pending on the vLAPIC.
1310 1310 *
1311 1311 * This is done with host CPU interrupts disabled so notification IPIs, either
1312 1312 * from the standard vCPU notification or APICv posted interrupts, will be
1313 1313 * queued on the host APIC and recognized when entering VMX context.
1314 1314 */
1315 1315 static enum event_inject_state
1316 1316 vmx_inject_vlapic(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
1317 1317 {
1318 1318 int vector;
1319 1319
1320 1320 if (!vlapic_pending_intr(vlapic, &vector)) {
1321 1321 return (EIS_CAN_INJECT);
1322 1322 }
1323 1323
1324 1324 /*
1325 1325 * From the Intel SDM, Volume 3, Section "Maskable
1326 1326 * Hardware Interrupts":
1327 1327 * - maskable interrupt vectors [16,255] can be delivered
1328 1328 * through the local APIC.
1329 1329 */
1330 1330 KASSERT(vector >= 16 && vector <= 255,
1331 1331 ("invalid vector %d from local APIC", vector));
1332 1332
1333 1333 if (vmx_cap_en(vmx, VMX_CAP_APICV)) {
1334 1334 uint16_t status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
1335 1335 uint16_t status_new = (status_old & 0xff00) | vector;
1336 1336
1337 1337 /*
1338 1338 * The APICv state will have been synced into the vLAPIC
1339 1339 * as part of vlapic_pending_intr(). Prepare the VMCS
1340 1340 * for the to-be-injected pending interrupt.
1341 1341 */
1342 1342 if (status_new > status_old) {
1343 1343 vmcs_write(VMCS_GUEST_INTR_STATUS, status_new);
1344 1344 VCPU_CTR2(vlapic->vm, vlapic->vcpuid,
1345 1345 "vmx_inject_interrupts: guest_intr_status "
1346 1346 "changed from 0x%04x to 0x%04x",
1347 1347 status_old, status_new);
1348 1348 }
1349 1349
1350 1350 /*
1351 1351 * Ensure VMCS state regarding EOI traps is kept in sync
1352 1352 * with the TMRs in the vlapic.
1353 1353 */
1354 1354 vmx_apicv_sync_tmr(vlapic);
1355 1355
1356 1356 /*
1357 1357 * The rest of the injection process for injecting the
1358 1358 * interrupt(s) is handled by APICv. It does not preclude other
1359 1359 * event injection from occurring.
1360 1360 */
1361 1361 return (EIS_CAN_INJECT);
1362 1362 }
1363 1363
1364 1364 ASSERT0(vmcs_read(VMCS_ENTRY_INTR_INFO) & VMCS_INTR_VALID);
1365 1365
1366 1366 /* Does guest interruptability block injection? */
1367 1367 if ((vmcs_read(VMCS_GUEST_INTERRUPTIBILITY) & HWINTR_BLOCKING) != 0 ||
1368 1368 (vmcs_read(VMCS_GUEST_RFLAGS) & PSL_I) == 0) {
1369 1369 return (EIS_GI_BLOCK);
1370 1370 }
1371 1371
1372 1372 /* Inject the interrupt */
1373 1373 vmcs_write(VMCS_ENTRY_INTR_INFO,
1374 1374 VMCS_INTR_T_HWINTR | VMCS_INTR_VALID | vector);
1375 1375
1376 1376 /* Update the Local APIC ISR */
1377 1377 vlapic_intr_accepted(vlapic, vector);
1378 1378
1379 1379 return (EIS_EV_INJECTED);
1380 1380 }
1381 1381
1382 1382 /*
1383 1383 * Re-check for events to be injected.
1384 1384 *
1385 1385 * Once host CPU interrupts are disabled, check for the presence of any events
1386 1386 * which require injection processing. If an exit is required upon injection,
1387 1387 * or once the guest becomes interruptable, that will be configured too.
1388 1388 */
1389 1389 static bool
1390 1390 vmx_inject_recheck(struct vmx *vmx, int vcpu, enum event_inject_state state)
1391 1391 {
1392 1392 if (state == EIS_CAN_INJECT) {
1393 1393 if (vm_nmi_pending(vmx->vm, vcpu) &&
1394 1394 !vmx_nmi_window_exiting(vmx, vcpu)) {
1395 1395 /* queued NMI not blocked by NMI-window-exiting */
1396 1396 return (true);
1397 1397 }
1398 1398 if (vm_extint_pending(vmx->vm, vcpu)) {
1399 1399 /* queued ExtINT not blocked by existing injection */
1400 1400 return (true);
1401 1401 }
1402 1402 } else {
1403 1403 if ((state & EIS_REQ_EXIT) != 0) {
1404 1404 /*
1405 1405 * Use a self-IPI to force an immediate exit after
1406 1406 * event injection has occurred.
1407 1407 */
1408 1408 poke_cpu(CPU->cpu_id);
1409 1409 } else {
1410 1410 /*
1411 1411 * If any event is being injected, an exit immediately
1412 1412 * upon becoming interruptable again will allow pending
1413 1413 * or newly queued events to be injected in a timely
1414 1414 * manner.
1415 1415 */
1416 1416 vmx_set_int_window_exiting(vmx, vcpu);
1417 1417 }
1418 1418 }
1419 1419 return (false);
1420 1420 }
1421 1421
1422 1422 /*
1423 1423 * If the Virtual NMIs execution control is '1' then the logical processor
1424 1424 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1425 1425 * the VMCS. An IRET instruction in VMX non-root operation will remove any
1426 1426 * virtual-NMI blocking.
1427 1427 *
1428 1428 * This unblocking occurs even if the IRET causes a fault. In this case the
1429 1429 * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1430 1430 */
1431 1431 static void
1432 1432 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1433 1433 {
1434 1434 uint32_t gi;
1435 1435
1436 1436 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1437 1437 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1438 1438 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1439 1439 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1440 1440 }
1441 1441
1442 1442 static void
1443 1443 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1444 1444 {
1445 1445 uint32_t gi;
1446 1446
1447 1447 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1448 1448 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1449 1449 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1450 1450 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1451 1451 }
1452 1452
1453 1453 static void
1454 1454 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1455 1455 {
1456 1456 uint32_t gi;
1457 1457
1458 1458 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1459 1459 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1460 1460 ("NMI blocking is not in effect %x", gi));
1461 1461 }
1462 1462
1463 1463 static int
1464 1464 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1465 1465 {
1466 1466 struct vmxctx *vmxctx;
1467 1467 uint64_t xcrval;
1468 1468 const struct xsave_limits *limits;
1469 1469
1470 1470 vmxctx = &vmx->ctx[vcpu];
1471 1471 limits = vmm_get_xsave_limits();
1472 1472
1473 1473 /*
1474 1474 * Note that the processor raises a GP# fault on its own if
1475 1475 * xsetbv is executed for CPL != 0, so we do not have to
1476 1476 * emulate that fault here.
1477 1477 */
1478 1478
1479 1479 /* Only xcr0 is supported. */
1480 1480 if (vmxctx->guest_rcx != 0) {
1481 1481 vm_inject_gp(vmx->vm, vcpu);
1482 1482 return (HANDLED);
1483 1483 }
1484 1484
1485 1485 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1486 1486 if (!limits->xsave_enabled ||
1487 1487 !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1488 1488 vm_inject_ud(vmx->vm, vcpu);
1489 1489 return (HANDLED);
1490 1490 }
1491 1491
1492 1492 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1493 1493 if ((xcrval & ~limits->xcr0_allowed) != 0) {
1494 1494 vm_inject_gp(vmx->vm, vcpu);
1495 1495 return (HANDLED);
1496 1496 }
1497 1497
1498 1498 if (!(xcrval & XFEATURE_ENABLED_X87)) {
1499 1499 vm_inject_gp(vmx->vm, vcpu);
1500 1500 return (HANDLED);
1501 1501 }
1502 1502
1503 1503 /* AVX (YMM_Hi128) requires SSE. */
1504 1504 if (xcrval & XFEATURE_ENABLED_AVX &&
1505 1505 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1506 1506 vm_inject_gp(vmx->vm, vcpu);
1507 1507 return (HANDLED);
1508 1508 }
1509 1509
1510 1510 /*
1511 1511 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1512 1512 * ZMM_Hi256, and Hi16_ZMM.
1513 1513 */
1514 1514 if (xcrval & XFEATURE_AVX512 &&
1515 1515 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1516 1516 (XFEATURE_AVX512 | XFEATURE_AVX)) {
1517 1517 vm_inject_gp(vmx->vm, vcpu);
1518 1518 return (HANDLED);
1519 1519 }
1520 1520
1521 1521 /*
1522 1522 * Intel MPX requires both bound register state flags to be
1523 1523 * set.
1524 1524 */
1525 1525 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1526 1526 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1527 1527 vm_inject_gp(vmx->vm, vcpu);
1528 1528 return (HANDLED);
1529 1529 }
1530 1530
1531 1531 /*
1532 1532 * This runs "inside" vmrun() with the guest's FPU state, so
1533 1533 * modifying xcr0 directly modifies the guest's xcr0, not the
1534 1534 * host's.
1535 1535 */
1536 1536 load_xcr(0, xcrval);
1537 1537 return (HANDLED);
1538 1538 }
1539 1539
1540 1540 static uint64_t
1541 1541 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1542 1542 {
1543 1543 const struct vmxctx *vmxctx;
1544 1544
1545 1545 vmxctx = &vmx->ctx[vcpu];
1546 1546
1547 1547 switch (ident) {
1548 1548 case 0:
1549 1549 return (vmxctx->guest_rax);
1550 1550 case 1:
1551 1551 return (vmxctx->guest_rcx);
1552 1552 case 2:
1553 1553 return (vmxctx->guest_rdx);
1554 1554 case 3:
1555 1555 return (vmxctx->guest_rbx);
1556 1556 case 4:
1557 1557 return (vmcs_read(VMCS_GUEST_RSP));
1558 1558 case 5:
1559 1559 return (vmxctx->guest_rbp);
1560 1560 case 6:
1561 1561 return (vmxctx->guest_rsi);
1562 1562 case 7:
1563 1563 return (vmxctx->guest_rdi);
1564 1564 case 8:
1565 1565 return (vmxctx->guest_r8);
1566 1566 case 9:
1567 1567 return (vmxctx->guest_r9);
1568 1568 case 10:
1569 1569 return (vmxctx->guest_r10);
1570 1570 case 11:
1571 1571 return (vmxctx->guest_r11);
1572 1572 case 12:
1573 1573 return (vmxctx->guest_r12);
1574 1574 case 13:
1575 1575 return (vmxctx->guest_r13);
1576 1576 case 14:
1577 1577 return (vmxctx->guest_r14);
1578 1578 case 15:
1579 1579 return (vmxctx->guest_r15);
1580 1580 default:
1581 1581 panic("invalid vmx register %d", ident);
1582 1582 }
1583 1583 }
1584 1584
1585 1585 static void
1586 1586 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1587 1587 {
1588 1588 struct vmxctx *vmxctx;
1589 1589
1590 1590 vmxctx = &vmx->ctx[vcpu];
1591 1591
1592 1592 switch (ident) {
1593 1593 case 0:
1594 1594 vmxctx->guest_rax = regval;
1595 1595 break;
1596 1596 case 1:
1597 1597 vmxctx->guest_rcx = regval;
1598 1598 break;
1599 1599 case 2:
1600 1600 vmxctx->guest_rdx = regval;
1601 1601 break;
1602 1602 case 3:
1603 1603 vmxctx->guest_rbx = regval;
1604 1604 break;
1605 1605 case 4:
1606 1606 vmcs_write(VMCS_GUEST_RSP, regval);
1607 1607 break;
1608 1608 case 5:
1609 1609 vmxctx->guest_rbp = regval;
1610 1610 break;
1611 1611 case 6:
1612 1612 vmxctx->guest_rsi = regval;
1613 1613 break;
1614 1614 case 7:
1615 1615 vmxctx->guest_rdi = regval;
1616 1616 break;
1617 1617 case 8:
1618 1618 vmxctx->guest_r8 = regval;
1619 1619 break;
1620 1620 case 9:
1621 1621 vmxctx->guest_r9 = regval;
1622 1622 break;
1623 1623 case 10:
1624 1624 vmxctx->guest_r10 = regval;
1625 1625 break;
1626 1626 case 11:
1627 1627 vmxctx->guest_r11 = regval;
1628 1628 break;
1629 1629 case 12:
1630 1630 vmxctx->guest_r12 = regval;
1631 1631 break;
1632 1632 case 13:
1633 1633 vmxctx->guest_r13 = regval;
1634 1634 break;
1635 1635 case 14:
1636 1636 vmxctx->guest_r14 = regval;
1637 1637 break;
1638 1638 case 15:
1639 1639 vmxctx->guest_r15 = regval;
1640 1640 break;
1641 1641 default:
1642 1642 panic("invalid vmx register %d", ident);
1643 1643 }
1644 1644 }
1645 1645
1646 1646 static int
1647 1647 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1648 1648 {
1649 1649 uint64_t crval, regval;
1650 1650
1651 1651 /* We only handle mov to %cr0 at this time */
1652 1652 if ((exitqual & 0xf0) != 0x00)
1653 1653 return (UNHANDLED);
1654 1654
1655 1655 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1656 1656
1657 1657 vmcs_write(VMCS_CR0_SHADOW, regval);
1658 1658
1659 1659 crval = regval | cr0_ones_mask;
1660 1660 crval &= ~cr0_zeros_mask;
1661 1661 vmcs_write(VMCS_GUEST_CR0, crval);
1662 1662
1663 1663 if (regval & CR0_PG) {
1664 1664 uint64_t efer, entry_ctls;
1665 1665
1666 1666 /*
1667 1667 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1668 1668 * the "IA-32e mode guest" bit in VM-entry control must be
1669 1669 * equal.
1670 1670 */
1671 1671 efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1672 1672 if (efer & EFER_LME) {
1673 1673 efer |= EFER_LMA;
1674 1674 vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1675 1675 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1676 1676 entry_ctls |= VM_ENTRY_GUEST_LMA;
1677 1677 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1678 1678 }
1679 1679 }
1680 1680
1681 1681 return (HANDLED);
1682 1682 }
1683 1683
1684 1684 static int
1685 1685 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1686 1686 {
1687 1687 uint64_t crval, regval;
1688 1688
1689 1689 /* We only handle mov to %cr4 at this time */
1690 1690 if ((exitqual & 0xf0) != 0x00)
1691 1691 return (UNHANDLED);
1692 1692
1693 1693 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1694 1694
1695 1695 vmcs_write(VMCS_CR4_SHADOW, regval);
1696 1696
1697 1697 crval = regval | cr4_ones_mask;
1698 1698 crval &= ~cr4_zeros_mask;
1699 1699 vmcs_write(VMCS_GUEST_CR4, crval);
1700 1700
1701 1701 return (HANDLED);
1702 1702 }
1703 1703
1704 1704 static int
1705 1705 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1706 1706 {
1707 1707 struct vlapic *vlapic;
1708 1708 uint64_t cr8;
1709 1709 int regnum;
1710 1710
1711 1711 /* We only handle mov %cr8 to/from a register at this time. */
1712 1712 if ((exitqual & 0xe0) != 0x00) {
1713 1713 return (UNHANDLED);
1714 1714 }
1715 1715
1716 1716 vlapic = vm_lapic(vmx->vm, vcpu);
1717 1717 regnum = (exitqual >> 8) & 0xf;
1718 1718 if (exitqual & 0x10) {
1719 1719 cr8 = vlapic_get_cr8(vlapic);
1720 1720 vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1721 1721 } else {
1722 1722 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1723 1723 vlapic_set_cr8(vlapic, cr8);
1724 1724 }
1725 1725
1726 1726 return (HANDLED);
1727 1727 }
1728 1728
1729 1729 /*
1730 1730 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1731 1731 */
1732 1732 static int
1733 1733 vmx_cpl(void)
1734 1734 {
1735 1735 uint32_t ssar;
1736 1736
1737 1737 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1738 1738 return ((ssar >> 5) & 0x3);
1739 1739 }
1740 1740
1741 1741 static enum vm_cpu_mode
1742 1742 vmx_cpu_mode(void)
1743 1743 {
1744 1744 uint32_t csar;
1745 1745
1746 1746 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1747 1747 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1748 1748 if (csar & 0x2000)
1749 1749 return (CPU_MODE_64BIT); /* CS.L = 1 */
1750 1750 else
1751 1751 return (CPU_MODE_COMPATIBILITY);
1752 1752 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1753 1753 return (CPU_MODE_PROTECTED);
1754 1754 } else {
1755 1755 return (CPU_MODE_REAL);
1756 1756 }
1757 1757 }
1758 1758
1759 1759 static enum vm_paging_mode
1760 1760 vmx_paging_mode(void)
1761 1761 {
1762 1762
1763 1763 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1764 1764 return (PAGING_MODE_FLAT);
1765 1765 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1766 1766 return (PAGING_MODE_32);
1767 1767 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1768 1768 return (PAGING_MODE_64);
1769 1769 else
1770 1770 return (PAGING_MODE_PAE);
1771 1771 }
1772 1772
1773 1773 static void
1774 1774 vmx_paging_info(struct vm_guest_paging *paging)
1775 1775 {
1776 1776 paging->cr3 = vmcs_guest_cr3();
1777 1777 paging->cpl = vmx_cpl();
1778 1778 paging->cpu_mode = vmx_cpu_mode();
1779 1779 paging->paging_mode = vmx_paging_mode();
1780 1780 }
1781 1781
1782 1782 static void
1783 1783 vmexit_mmio_emul(struct vm_exit *vmexit, struct vie *vie, uint64_t gpa,
1784 1784 uint64_t gla)
1785 1785 {
1786 1786 struct vm_guest_paging paging;
1787 1787 uint32_t csar;
1788 1788
1789 1789 vmexit->exitcode = VM_EXITCODE_MMIO_EMUL;
1790 1790 vmexit->inst_length = 0;
1791 1791 vmexit->u.mmio_emul.gpa = gpa;
1792 1792 vmexit->u.mmio_emul.gla = gla;
1793 1793 vmx_paging_info(&paging);
1794 1794
1795 1795 switch (paging.cpu_mode) {
1796 1796 case CPU_MODE_REAL:
1797 1797 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1798 1798 vmexit->u.mmio_emul.cs_d = 0;
1799 1799 break;
1800 1800 case CPU_MODE_PROTECTED:
1801 1801 case CPU_MODE_COMPATIBILITY:
1802 1802 vmexit->u.mmio_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1803 1803 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1804 1804 vmexit->u.mmio_emul.cs_d = SEG_DESC_DEF32(csar);
1805 1805 break;
1806 1806 default:
1807 1807 vmexit->u.mmio_emul.cs_base = 0;
1808 1808 vmexit->u.mmio_emul.cs_d = 0;
1809 1809 break;
1810 1810 }
1811 1811
1812 1812 vie_init_mmio(vie, NULL, 0, &paging, gpa);
1813 1813 }
1814 1814
1815 1815 static void
1816 1816 vmexit_inout(struct vm_exit *vmexit, struct vie *vie, uint64_t qual,
1817 1817 uint32_t eax)
1818 1818 {
1819 1819 struct vm_guest_paging paging;
1820 1820 struct vm_inout *inout;
1821 1821
1822 1822 inout = &vmexit->u.inout;
1823 1823
1824 1824 inout->bytes = (qual & 0x7) + 1;
1825 1825 inout->flags = 0;
1826 1826 inout->flags |= (qual & 0x8) ? INOUT_IN : 0;
1827 1827 inout->flags |= (qual & 0x10) ? INOUT_STR : 0;
1828 1828 inout->flags |= (qual & 0x20) ? INOUT_REP : 0;
1829 1829 inout->port = (uint16_t)(qual >> 16);
1830 1830 inout->eax = eax;
1831 1831 if (inout->flags & INOUT_STR) {
1832 1832 uint64_t inst_info;
1833 1833
1834 1834 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
1835 1835
1836 1836 /*
1837 1837 * According to the SDM, bits 9:7 encode the address size of the
1838 1838 * ins/outs operation, but only values 0/1/2 are expected,
1839 1839 * corresponding to 16/32/64 bit sizes.
1840 1840 */
1841 1841 inout->addrsize = 2 << BITX(inst_info, 9, 7);
1842 1842 VERIFY(inout->addrsize == 2 || inout->addrsize == 4 ||
1843 1843 inout->addrsize == 8);
1844 1844
1845 1845 if (inout->flags & INOUT_IN) {
1846 1846 /*
1847 1847 * The bits describing the segment in INSTRUCTION_INFO
1848 1848 * are not defined for ins, leaving it to system
1849 1849 * software to assume %es (encoded as 0)
1850 1850 */
1851 1851 inout->segment = 0;
1852 1852 } else {
1853 1853 /*
1854 1854 * Bits 15-17 encode the segment for OUTS.
1855 1855 * This value follows the standard x86 segment order.
1856 1856 */
1857 1857 inout->segment = (inst_info >> 15) & 0x7;
1858 1858 }
1859 1859 }
1860 1860
1861 1861 vmexit->exitcode = VM_EXITCODE_INOUT;
1862 1862 vmx_paging_info(&paging);
1863 1863 vie_init_inout(vie, inout, vmexit->inst_length, &paging);
1864 1864
1865 1865 /* The in/out emulation will handle advancing %rip */
1866 1866 vmexit->inst_length = 0;
1867 1867 }
1868 1868
1869 1869 static int
1870 1870 ept_fault_type(uint64_t ept_qual)
1871 1871 {
1872 1872 int fault_type;
1873 1873
1874 1874 if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1875 1875 fault_type = VM_PROT_WRITE;
1876 1876 else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1877 1877 fault_type = VM_PROT_EXECUTE;
1878 1878 else
1879 1879 fault_type = VM_PROT_READ;
1880 1880
1881 1881 return (fault_type);
1882 1882 }
1883 1883
1884 1884 static bool
1885 1885 ept_emulation_fault(uint64_t ept_qual)
1886 1886 {
1887 1887 int read, write;
1888 1888
1889 1889 /* EPT fault on an instruction fetch doesn't make sense here */
1890 1890 if (ept_qual & EPT_VIOLATION_INST_FETCH)
1891 1891 return (false);
1892 1892
1893 1893 /* EPT fault must be a read fault or a write fault */
1894 1894 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1895 1895 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1896 1896 if ((read | write) == 0)
1897 1897 return (false);
1898 1898
1899 1899 /*
1900 1900 * The EPT violation must have been caused by accessing a
1901 1901 * guest-physical address that is a translation of a guest-linear
1902 1902 * address.
1903 1903 */
1904 1904 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1905 1905 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1906 1906 return (false);
1907 1907 }
1908 1908
1909 1909 return (true);
1910 1910 }
1911 1911
1912 1912 static __inline int
1913 1913 apic_access_virtualization(struct vmx *vmx, int vcpuid)
1914 1914 {
1915 1915 uint32_t proc_ctls2;
1916 1916
1917 1917 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1918 1918 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1919 1919 }
1920 1920
1921 1921 static __inline int
1922 1922 x2apic_virtualization(struct vmx *vmx, int vcpuid)
1923 1923 {
1924 1924 uint32_t proc_ctls2;
1925 1925
1926 1926 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1927 1927 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1928 1928 }
1929 1929
1930 1930 static int
1931 1931 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1932 1932 uint64_t qual)
1933 1933 {
1934 1934 int handled, offset;
1935 1935 uint32_t *apic_regs, vector;
1936 1936
1937 1937 handled = HANDLED;
1938 1938 offset = APIC_WRITE_OFFSET(qual);
1939 1939
1940 1940 if (!apic_access_virtualization(vmx, vcpuid)) {
1941 1941 /*
1942 1942 * In general there should not be any APIC write VM-exits
1943 1943 * unless APIC-access virtualization is enabled.
1944 1944 *
1945 1945 * However self-IPI virtualization can legitimately trigger
1946 1946 * an APIC-write VM-exit so treat it specially.
1947 1947 */
1948 1948 if (x2apic_virtualization(vmx, vcpuid) &&
1949 1949 offset == APIC_OFFSET_SELF_IPI) {
1950 1950 apic_regs = (uint32_t *)(vlapic->apic_page);
1951 1951 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
1952 1952 vlapic_self_ipi_handler(vlapic, vector);
1953 1953 return (HANDLED);
1954 1954 } else
1955 1955 return (UNHANDLED);
1956 1956 }
1957 1957
1958 1958 switch (offset) {
1959 1959 case APIC_OFFSET_ID:
1960 1960 vlapic_id_write_handler(vlapic);
1961 1961 break;
1962 1962 case APIC_OFFSET_LDR:
1963 1963 vlapic_ldr_write_handler(vlapic);
1964 1964 break;
1965 1965 case APIC_OFFSET_DFR:
1966 1966 vlapic_dfr_write_handler(vlapic);
1967 1967 break;
1968 1968 case APIC_OFFSET_SVR:
1969 1969 vlapic_svr_write_handler(vlapic);
1970 1970 break;
1971 1971 case APIC_OFFSET_ESR:
1972 1972 vlapic_esr_write_handler(vlapic);
1973 1973 break;
1974 1974 case APIC_OFFSET_ICR_LOW:
1975 1975 if (vlapic_icrlo_write_handler(vlapic) != 0) {
1976 1976 handled = UNHANDLED;
1977 1977 }
1978 1978 break;
1979 1979 case APIC_OFFSET_CMCI_LVT:
1980 1980 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1981 1981 vlapic_lvt_write_handler(vlapic, offset);
1982 1982 break;
1983 1983 case APIC_OFFSET_TIMER_ICR:
1984 1984 vlapic_icrtmr_write_handler(vlapic);
1985 1985 break;
1986 1986 case APIC_OFFSET_TIMER_DCR:
1987 1987 vlapic_dcr_write_handler(vlapic);
1988 1988 break;
1989 1989 default:
1990 1990 handled = UNHANDLED;
1991 1991 break;
1992 1992 }
1993 1993 return (handled);
1994 1994 }
1995 1995
1996 1996 static bool
1997 1997 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
1998 1998 {
1999 1999
2000 2000 if (apic_access_virtualization(vmx, vcpuid) &&
2001 2001 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
2002 2002 return (true);
2003 2003 else
2004 2004 return (false);
2005 2005 }
2006 2006
2007 2007 static int
2008 2008 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2009 2009 {
2010 2010 uint64_t qual;
2011 2011 int access_type, offset, allowed;
2012 2012 struct vie *vie;
2013 2013
2014 2014 if (!apic_access_virtualization(vmx, vcpuid))
2015 2015 return (UNHANDLED);
2016 2016
2017 2017 qual = vmexit->u.vmx.exit_qualification;
2018 2018 access_type = APIC_ACCESS_TYPE(qual);
2019 2019 offset = APIC_ACCESS_OFFSET(qual);
2020 2020
2021 2021 allowed = 0;
2022 2022 if (access_type == 0) {
2023 2023 /*
2024 2024 * Read data access to the following registers is expected.
2025 2025 */
2026 2026 switch (offset) {
2027 2027 case APIC_OFFSET_APR:
2028 2028 case APIC_OFFSET_PPR:
2029 2029 case APIC_OFFSET_RRR:
2030 2030 case APIC_OFFSET_CMCI_LVT:
2031 2031 case APIC_OFFSET_TIMER_CCR:
2032 2032 allowed = 1;
2033 2033 break;
2034 2034 default:
2035 2035 break;
2036 2036 }
2037 2037 } else if (access_type == 1) {
2038 2038 /*
2039 2039 * Write data access to the following registers is expected.
2040 2040 */
2041 2041 switch (offset) {
2042 2042 case APIC_OFFSET_VER:
2043 2043 case APIC_OFFSET_APR:
2044 2044 case APIC_OFFSET_PPR:
2045 2045 case APIC_OFFSET_RRR:
2046 2046 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
2047 2047 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
2048 2048 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
2049 2049 case APIC_OFFSET_CMCI_LVT:
2050 2050 case APIC_OFFSET_TIMER_CCR:
2051 2051 allowed = 1;
2052 2052 break;
2053 2053 default:
2054 2054 break;
2055 2055 }
2056 2056 }
2057 2057
2058 2058 if (allowed) {
2059 2059 vie = vm_vie_ctx(vmx->vm, vcpuid);
2060 2060 vmexit_mmio_emul(vmexit, vie, DEFAULT_APIC_BASE + offset,
2061 2061 VIE_INVALID_GLA);
2062 2062 }
2063 2063
2064 2064 /*
2065 2065 * Regardless of whether the APIC-access is allowed this handler
2066 2066 * always returns UNHANDLED:
2067 2067 * - if the access is allowed then it is handled by emulating the
2068 2068 * instruction that caused the VM-exit (outside the critical section)
2069 2069 * - if the access is not allowed then it will be converted to an
2070 2070 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
2071 2071 */
2072 2072 return (UNHANDLED);
2073 2073 }
2074 2074
2075 2075 static enum task_switch_reason
2076 2076 vmx_task_switch_reason(uint64_t qual)
2077 2077 {
2078 2078 int reason;
2079 2079
2080 2080 reason = (qual >> 30) & 0x3;
2081 2081 switch (reason) {
2082 2082 case 0:
2083 2083 return (TSR_CALL);
2084 2084 case 1:
2085 2085 return (TSR_IRET);
2086 2086 case 2:
2087 2087 return (TSR_JMP);
2088 2088 case 3:
2089 2089 return (TSR_IDT_GATE);
2090 2090 default:
2091 2091 panic("%s: invalid reason %d", __func__, reason);
2092 2092 }
2093 2093 }
2094 2094
2095 2095 static int
2096 2096 emulate_wrmsr(struct vmx *vmx, int vcpuid, uint_t num, uint64_t val)
2097 2097 {
2098 2098 int error;
2099 2099
2100 2100 if (lapic_msr(num))
2101 2101 error = lapic_wrmsr(vmx->vm, vcpuid, num, val);
2102 2102 else
2103 2103 error = vmx_wrmsr(vmx, vcpuid, num, val);
2104 2104
2105 2105 return (error);
2106 2106 }
2107 2107
2108 2108 static int
2109 2109 emulate_rdmsr(struct vmx *vmx, int vcpuid, uint_t num)
2110 2110 {
2111 2111 uint64_t result;
2112 2112 int error;
2113 2113
2114 2114 if (lapic_msr(num))
2115 2115 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result);
2116 2116 else
2117 2117 error = vmx_rdmsr(vmx, vcpuid, num, &result);
2118 2118
2119 2119 if (error == 0) {
2120 2120 vmx->ctx[vcpuid].guest_rax = (uint32_t)result;
2121 2121 vmx->ctx[vcpuid].guest_rdx = result >> 32;
2122 2122 }
2123 2123
2124 2124 return (error);
2125 2125 }
2126 2126
2127 2127 #ifndef __FreeBSD__
2128 2128 #define __predict_false(x) (x)
2129 2129 #endif
2130 2130
2131 2131 static int
2132 2132 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2133 2133 {
2134 2134 int error, errcode, errcode_valid, handled;
2135 2135 struct vmxctx *vmxctx;
2136 2136 struct vie *vie;
2137 2137 struct vlapic *vlapic;
2138 2138 struct vm_task_switch *ts;
2139 2139 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info;
2140 2140 uint32_t intr_type, intr_vec, reason;
2141 2141 uint64_t exitintinfo, qual, gpa;
2142 2142
2143 2143 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2144 2144 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2145 2145
2146 2146 handled = UNHANDLED;
2147 2147 vmxctx = &vmx->ctx[vcpu];
2148 2148
2149 2149 qual = vmexit->u.vmx.exit_qualification;
2150 2150 reason = vmexit->u.vmx.exit_reason;
2151 2151 vmexit->exitcode = VM_EXITCODE_BOGUS;
2152 2152
2153 2153 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2154 2154 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit);
2155 2155
2156 2156 /*
2157 2157 * VM-entry failures during or after loading guest state.
2158 2158 *
2159 2159 * These VM-exits are uncommon but must be handled specially
2160 2160 * as most VM-exit fields are not populated as usual.
2161 2161 */
2162 2162 if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
2163 2163 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
2164 2164 #ifdef __FreeBSD__
2165 2165 __asm __volatile("int $18");
2166 2166 #else
2167 2167 vmm_call_trap(T_MCE);
2168 2168 #endif
2169 2169 return (1);
2170 2170 }
2171 2171
2172 2172 /*
2173 2173 * VM exits that can be triggered during event delivery need to
2174 2174 * be handled specially by re-injecting the event if the IDT
2175 2175 * vectoring information field's valid bit is set.
2176 2176 *
2177 2177 * See "Information for VM Exits During Event Delivery" in Intel SDM
2178 2178 * for details.
2179 2179 */
2180 2180 idtvec_info = vmcs_idt_vectoring_info();
2181 2181 if (idtvec_info & VMCS_IDT_VEC_VALID) {
2182 2182 idtvec_info &= ~(1 << 12); /* clear undefined bit */
2183 2183 exitintinfo = idtvec_info;
2184 2184 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2185 2185 idtvec_err = vmcs_idt_vectoring_err();
2186 2186 exitintinfo |= (uint64_t)idtvec_err << 32;
2187 2187 }
2188 2188 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
2189 2189 KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
2190 2190 __func__, error));
2191 2191
2192 2192 /*
2193 2193 * If 'virtual NMIs' are being used and the VM-exit
2194 2194 * happened while injecting an NMI during the previous
2195 2195 * VM-entry, then clear "blocking by NMI" in the
2196 2196 * Guest Interruptibility-State so the NMI can be
2197 2197 * reinjected on the subsequent VM-entry.
2198 2198 *
2199 2199 * However, if the NMI was being delivered through a task
2200 2200 * gate, then the new task must start execution with NMIs
2201 2201 * blocked so don't clear NMI blocking in this case.
2202 2202 */
2203 2203 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2204 2204 if (intr_type == VMCS_INTR_T_NMI) {
2205 2205 if (reason != EXIT_REASON_TASK_SWITCH)
2206 2206 vmx_clear_nmi_blocking(vmx, vcpu);
2207 2207 else
2208 2208 vmx_assert_nmi_blocking(vmx, vcpu);
2209 2209 }
2210 2210
2211 2211 /*
2212 2212 * Update VM-entry instruction length if the event being
2213 2213 * delivered was a software interrupt or software exception.
2214 2214 */
2215 2215 if (intr_type == VMCS_INTR_T_SWINTR ||
2216 2216 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2217 2217 intr_type == VMCS_INTR_T_SWEXCEPTION) {
2218 2218 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2219 2219 }
2220 2220 }
2221 2221
2222 2222 switch (reason) {
2223 2223 case EXIT_REASON_TASK_SWITCH:
2224 2224 ts = &vmexit->u.task_switch;
2225 2225 ts->tsssel = qual & 0xffff;
2226 2226 ts->reason = vmx_task_switch_reason(qual);
2227 2227 ts->ext = 0;
2228 2228 ts->errcode_valid = 0;
2229 2229 vmx_paging_info(&ts->paging);
2230 2230 /*
2231 2231 * If the task switch was due to a CALL, JMP, IRET, software
2232 2232 * interrupt (INT n) or software exception (INT3, INTO),
2233 2233 * then the saved %rip references the instruction that caused
2234 2234 * the task switch. The instruction length field in the VMCS
2235 2235 * is valid in this case.
2236 2236 *
2237 2237 * In all other cases (e.g., NMI, hardware exception) the
2238 2238 * saved %rip is one that would have been saved in the old TSS
2239 2239 * had the task switch completed normally so the instruction
2240 2240 * length field is not needed in this case and is explicitly
2241 2241 * set to 0.
2242 2242 */
2243 2243 if (ts->reason == TSR_IDT_GATE) {
2244 2244 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2245 2245 ("invalid idtvec_info %x for IDT task switch",
2246 2246 idtvec_info));
2247 2247 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2248 2248 if (intr_type != VMCS_INTR_T_SWINTR &&
2249 2249 intr_type != VMCS_INTR_T_SWEXCEPTION &&
2250 2250 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2251 2251 /* Task switch triggered by external event */
2252 2252 ts->ext = 1;
2253 2253 vmexit->inst_length = 0;
2254 2254 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2255 2255 ts->errcode_valid = 1;
2256 2256 ts->errcode = vmcs_idt_vectoring_err();
2257 2257 }
2258 2258 }
2259 2259 }
2260 2260 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2261 2261 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts);
2262 2262 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2263 2263 "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2264 2264 ts->ext ? "external" : "internal",
2265 2265 ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2266 2266 break;
2267 2267 case EXIT_REASON_CR_ACCESS:
2268 2268 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2269 2269 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual);
2270 2270 switch (qual & 0xf) {
2271 2271 case 0:
2272 2272 handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2273 2273 break;
2274 2274 case 4:
2275 2275 handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2276 2276 break;
2277 2277 case 8:
2278 2278 handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2279 2279 break;
2280 2280 }
2281 2281 break;
2282 2282 case EXIT_REASON_RDMSR:
2283 2283 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2284 2284 ecx = vmxctx->guest_rcx;
2285 2285 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2286 2286 SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpu, vmexit, ecx);
2287 2287 error = emulate_rdmsr(vmx, vcpu, ecx);
2288 2288 if (error == 0) {
2289 2289 handled = HANDLED;
2290 2290 } else if (error > 0) {
2291 2291 vmexit->exitcode = VM_EXITCODE_RDMSR;
2292 2292 vmexit->u.msr.code = ecx;
2293 2293 } else {
2294 2294 /* Return to userspace with a valid exitcode */
2295 2295 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2296 2296 ("emulate_rdmsr retu with bogus exitcode"));
2297 2297 }
2298 2298 break;
2299 2299 case EXIT_REASON_WRMSR:
2300 2300 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2301 2301 eax = vmxctx->guest_rax;
2302 2302 ecx = vmxctx->guest_rcx;
2303 2303 edx = vmxctx->guest_rdx;
2304 2304 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2305 2305 ecx, (uint64_t)edx << 32 | eax);
2306 2306 SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpu, ecx,
2307 2307 (uint64_t)edx << 32 | eax);
2308 2308 error = emulate_wrmsr(vmx, vcpu, ecx,
2309 2309 (uint64_t)edx << 32 | eax);
2310 2310 if (error == 0) {
2311 2311 handled = HANDLED;
2312 2312 } else if (error > 0) {
2313 2313 vmexit->exitcode = VM_EXITCODE_WRMSR;
2314 2314 vmexit->u.msr.code = ecx;
2315 2315 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2316 2316 } else {
2317 2317 /* Return to userspace with a valid exitcode */
2318 2318 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2319 2319 ("emulate_wrmsr retu with bogus exitcode"));
2320 2320 }
2321 2321 break;
2322 2322 case EXIT_REASON_HLT:
2323 2323 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2324 2324 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit);
2325 2325 vmexit->exitcode = VM_EXITCODE_HLT;
2326 2326 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2327 2327 break;
2328 2328 case EXIT_REASON_MTF:
2329 2329 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2330 2330 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit);
2331 2331 vmexit->exitcode = VM_EXITCODE_MTRAP;
2332 2332 vmexit->inst_length = 0;
2333 2333 break;
2334 2334 case EXIT_REASON_PAUSE:
2335 2335 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2336 2336 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit);
2337 2337 vmexit->exitcode = VM_EXITCODE_PAUSE;
2338 2338 break;
2339 2339 case EXIT_REASON_INTR_WINDOW:
2340 2340 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2341 2341 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit);
2342 2342 vmx_clear_int_window_exiting(vmx, vcpu);
2343 2343 return (1);
2344 2344 case EXIT_REASON_EXT_INTR:
2345 2345 /*
2346 2346 * External interrupts serve only to cause VM exits and allow
2347 2347 * the host interrupt handler to run.
2348 2348 *
2349 2349 * If this external interrupt triggers a virtual interrupt
2350 2350 * to a VM, then that state will be recorded by the
2351 2351 * host interrupt handler in the VM's softc. We will inject
2352 2352 * this virtual interrupt during the subsequent VM enter.
2353 2353 */
2354 2354 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2355 2355 SDT_PROBE4(vmm, vmx, exit, interrupt,
2356 2356 vmx, vcpu, vmexit, intr_info);
2357 2357
2358 2358 /*
2359 2359 * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2360 2360 * This appears to be a bug in VMware Fusion?
2361 2361 */
2362 2362 if (!(intr_info & VMCS_INTR_VALID))
2363 2363 return (1);
2364 2364 KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2365 2365 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2366 2366 ("VM exit interruption info invalid: %x", intr_info));
2367 2367 vmx_trigger_hostintr(intr_info & 0xff);
2368 2368
2369 2369 /*
2370 2370 * This is special. We want to treat this as an 'handled'
2371 2371 * VM-exit but not increment the instruction pointer.
2372 2372 */
2373 2373 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2374 2374 return (1);
2375 2375 case EXIT_REASON_NMI_WINDOW:
2376 2376 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit);
2377 2377 /* Exit to allow the pending virtual NMI to be injected */
2378 2378 if (vm_nmi_pending(vmx->vm, vcpu))
2379 2379 vmx_inject_nmi(vmx, vcpu);
2380 2380 vmx_clear_nmi_window_exiting(vmx, vcpu);
2381 2381 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2382 2382 return (1);
2383 2383 case EXIT_REASON_INOUT:
2384 2384 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2385 2385 vie = vm_vie_ctx(vmx->vm, vcpu);
2386 2386 vmexit_inout(vmexit, vie, qual, (uint32_t)vmxctx->guest_rax);
2387 2387 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit);
2388 2388 break;
2389 2389 case EXIT_REASON_CPUID:
2390 2390 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2391 2391 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit);
2392 2392 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2393 2393 break;
2394 2394 case EXIT_REASON_EXCEPTION:
2395 2395 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2396 2396 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2397 2397 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2398 2398 ("VM exit interruption info invalid: %x", intr_info));
2399 2399
2400 2400 intr_vec = intr_info & 0xff;
2401 2401 intr_type = intr_info & VMCS_INTR_T_MASK;
2402 2402
2403 2403 /*
2404 2404 * If Virtual NMIs control is 1 and the VM-exit is due to a
2405 2405 * fault encountered during the execution of IRET then we must
2406 2406 * restore the state of "virtual-NMI blocking" before resuming
2407 2407 * the guest.
2408 2408 *
2409 2409 * See "Resuming Guest Software after Handling an Exception".
2410 2410 * See "Information for VM Exits Due to Vectored Events".
2411 2411 */
2412 2412 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2413 2413 (intr_vec != IDT_DF) &&
2414 2414 (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2415 2415 vmx_restore_nmi_blocking(vmx, vcpu);
2416 2416
2417 2417 /*
2418 2418 * The NMI has already been handled in vmx_exit_handle_nmi().
2419 2419 */
2420 2420 if (intr_type == VMCS_INTR_T_NMI)
2421 2421 return (1);
2422 2422
2423 2423 /*
2424 2424 * Call the machine check handler by hand. Also don't reflect
2425 2425 * the machine check back into the guest.
2426 2426 */
2427 2427 if (intr_vec == IDT_MC) {
2428 2428 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
2429 2429 #ifdef __FreeBSD__
2430 2430 __asm __volatile("int $18");
2431 2431 #else
2432 2432 vmm_call_trap(T_MCE);
2433 2433 #endif
2434 2434 return (1);
2435 2435 }
2436 2436
2437 2437 /*
2438 2438 * If the hypervisor has requested user exits for
2439 2439 * debug exceptions, bounce them out to userland.
2440 2440 */
2441 2441 if (intr_type == VMCS_INTR_T_SWEXCEPTION &&
2442 2442 intr_vec == IDT_BP &&
2443 2443 (vmx->cap[vcpu].set & (1 << VM_CAP_BPT_EXIT))) {
2444 2444 vmexit->exitcode = VM_EXITCODE_BPT;
2445 2445 vmexit->u.bpt.inst_length = vmexit->inst_length;
2446 2446 vmexit->inst_length = 0;
2447 2447 break;
2448 2448 }
2449 2449
2450 2450 if (intr_vec == IDT_PF) {
2451 2451 vmxctx->guest_cr2 = qual;
2452 2452 }
2453 2453
2454 2454 /*
2455 2455 * Software exceptions exhibit trap-like behavior. This in
2456 2456 * turn requires populating the VM-entry instruction length
2457 2457 * so that the %rip in the trap frame is past the INT3/INTO
2458 2458 * instruction.
2459 2459 */
2460 2460 if (intr_type == VMCS_INTR_T_SWEXCEPTION)
2461 2461 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2462 2462
2463 2463 /* Reflect all other exceptions back into the guest */
2464 2464 errcode_valid = errcode = 0;
2465 2465 if (intr_info & VMCS_INTR_DEL_ERRCODE) {
2466 2466 errcode_valid = 1;
2467 2467 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
2468 2468 }
2469 2469 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%x into "
2470 2470 "the guest", intr_vec, errcode);
2471 2471 SDT_PROBE5(vmm, vmx, exit, exception,
2472 2472 vmx, vcpu, vmexit, intr_vec, errcode);
2473 2473 error = vm_inject_exception(vmx->vm, vcpu, intr_vec,
2474 2474 errcode_valid, errcode, 0);
2475 2475 KASSERT(error == 0, ("%s: vm_inject_exception error %d",
2476 2476 __func__, error));
2477 2477 return (1);
2478 2478
2479 2479 case EXIT_REASON_EPT_FAULT:
2480 2480 /*
2481 2481 * If 'gpa' lies within the address space allocated to
2482 2482 * memory then this must be a nested page fault otherwise
2483 2483 * this must be an instruction that accesses MMIO space.
2484 2484 */
2485 2485 gpa = vmcs_gpa();
2486 2486 if (vm_mem_allocated(vmx->vm, vcpu, gpa) ||
2487 2487 apic_access_fault(vmx, vcpu, gpa)) {
2488 2488 vmexit->exitcode = VM_EXITCODE_PAGING;
2489 2489 vmexit->inst_length = 0;
2490 2490 vmexit->u.paging.gpa = gpa;
2491 2491 vmexit->u.paging.fault_type = ept_fault_type(qual);
2492 2492 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2493 2493 SDT_PROBE5(vmm, vmx, exit, nestedfault,
2494 2494 vmx, vcpu, vmexit, gpa, qual);
2495 2495 } else if (ept_emulation_fault(qual)) {
2496 2496 vie = vm_vie_ctx(vmx->vm, vcpu);
2497 2497 vmexit_mmio_emul(vmexit, vie, gpa, vmcs_gla());
2498 2498 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MMIO_EMUL, 1);
2499 2499 SDT_PROBE4(vmm, vmx, exit, mmiofault,
2500 2500 vmx, vcpu, vmexit, gpa);
2501 2501 }
2502 2502 /*
2503 2503 * If Virtual NMIs control is 1 and the VM-exit is due to an
2504 2504 * EPT fault during the execution of IRET then we must restore
2505 2505 * the state of "virtual-NMI blocking" before resuming.
2506 2506 *
2507 2507 * See description of "NMI unblocking due to IRET" in
2508 2508 * "Exit Qualification for EPT Violations".
2509 2509 */
2510 2510 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2511 2511 (qual & EXIT_QUAL_NMIUDTI) != 0)
2512 2512 vmx_restore_nmi_blocking(vmx, vcpu);
2513 2513 break;
2514 2514 case EXIT_REASON_VIRTUALIZED_EOI:
2515 2515 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2516 2516 vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2517 2517 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit);
2518 2518 vmexit->inst_length = 0; /* trap-like */
2519 2519 break;
2520 2520 case EXIT_REASON_APIC_ACCESS:
2521 2521 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit);
2522 2522 handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2523 2523 break;
2524 2524 case EXIT_REASON_APIC_WRITE:
2525 2525 /*
2526 2526 * APIC-write VM exit is trap-like so the %rip is already
2527 2527 * pointing to the next instruction.
2528 2528 */
2529 2529 vmexit->inst_length = 0;
2530 2530 vlapic = vm_lapic(vmx->vm, vcpu);
2531 2531 SDT_PROBE4(vmm, vmx, exit, apicwrite,
2532 2532 vmx, vcpu, vmexit, vlapic);
2533 2533 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2534 2534 break;
2535 2535 case EXIT_REASON_XSETBV:
2536 2536 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit);
2537 2537 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2538 2538 break;
2539 2539 case EXIT_REASON_MONITOR:
2540 2540 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit);
2541 2541 vmexit->exitcode = VM_EXITCODE_MONITOR;
2542 2542 break;
2543 2543 case EXIT_REASON_MWAIT:
2544 2544 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit);
2545 2545 vmexit->exitcode = VM_EXITCODE_MWAIT;
2546 2546 break;
2547 2547 case EXIT_REASON_TPR:
2548 2548 vlapic = vm_lapic(vmx->vm, vcpu);
2549 2549 vlapic_sync_tpr(vlapic);
2550 2550 vmexit->inst_length = 0;
2551 2551 handled = HANDLED;
2552 2552 break;
2553 2553 case EXIT_REASON_VMCALL:
2554 2554 case EXIT_REASON_VMCLEAR:
2555 2555 case EXIT_REASON_VMLAUNCH:
2556 2556 case EXIT_REASON_VMPTRLD:
2557 2557 case EXIT_REASON_VMPTRST:
2558 2558 case EXIT_REASON_VMREAD:
2559 2559 case EXIT_REASON_VMRESUME:
2560 2560 case EXIT_REASON_VMWRITE:
2561 2561 case EXIT_REASON_VMXOFF:
2562 2562 case EXIT_REASON_VMXON:
2563 2563 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit);
2564 2564 vmexit->exitcode = VM_EXITCODE_VMINSN;
2565 2565 break;
2566 2566 default:
2567 2567 SDT_PROBE4(vmm, vmx, exit, unknown,
2568 2568 vmx, vcpu, vmexit, reason);
2569 2569 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2570 2570 break;
2571 2571 }
2572 2572
2573 2573 if (handled) {
2574 2574 /*
2575 2575 * It is possible that control is returned to userland
2576 2576 * even though we were able to handle the VM exit in the
2577 2577 * kernel.
2578 2578 *
2579 2579 * In such a case we want to make sure that the userland
2580 2580 * restarts guest execution at the instruction *after*
2581 2581 * the one we just processed. Therefore we update the
2582 2582 * guest rip in the VMCS and in 'vmexit'.
2583 2583 */
2584 2584 vmexit->rip += vmexit->inst_length;
2585 2585 vmexit->inst_length = 0;
2586 2586 vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2587 2587 } else {
2588 2588 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2589 2589 /*
2590 2590 * If this VM exit was not claimed by anybody then
2591 2591 * treat it as a generic VMX exit.
2592 2592 */
2593 2593 vmexit->exitcode = VM_EXITCODE_VMX;
2594 2594 vmexit->u.vmx.status = VM_SUCCESS;
2595 2595 vmexit->u.vmx.inst_type = 0;
2596 2596 vmexit->u.vmx.inst_error = 0;
2597 2597 } else {
2598 2598 /*
2599 2599 * The exitcode and collateral have been populated.
2600 2600 * The VM exit will be processed further in userland.
2601 2601 */
2602 2602 }
2603 2603 }
2604 2604
2605 2605 SDT_PROBE4(vmm, vmx, exit, return,
2606 2606 vmx, vcpu, vmexit, handled);
2607 2607 return (handled);
2608 2608 }
2609 2609
2610 2610 static void
2611 2611 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2612 2612 {
2613 2613
2614 2614 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2615 2615 ("vmx_exit_inst_error: invalid inst_fail_status %d",
2616 2616 vmxctx->inst_fail_status));
2617 2617
2618 2618 vmexit->inst_length = 0;
2619 2619 vmexit->exitcode = VM_EXITCODE_VMX;
2620 2620 vmexit->u.vmx.status = vmxctx->inst_fail_status;
2621 2621 vmexit->u.vmx.inst_error = vmcs_instruction_error();
2622 2622 vmexit->u.vmx.exit_reason = ~0;
2623 2623 vmexit->u.vmx.exit_qualification = ~0;
2624 2624
2625 2625 switch (rc) {
2626 2626 case VMX_VMRESUME_ERROR:
2627 2627 case VMX_VMLAUNCH_ERROR:
2628 2628 case VMX_INVEPT_ERROR:
2629 2629 #ifndef __FreeBSD__
2630 2630 case VMX_VMWRITE_ERROR:
2631 2631 #endif
2632 2632 vmexit->u.vmx.inst_type = rc;
2633 2633 break;
2634 2634 default:
2635 2635 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2636 2636 }
2637 2637 }
2638 2638
2639 2639 /*
2640 2640 * If the NMI-exiting VM execution control is set to '1' then an NMI in
2641 2641 * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2642 2642 * sufficient to simply vector to the NMI handler via a software interrupt.
2643 2643 * However, this must be done before maskable interrupts are enabled
2644 2644 * otherwise the "iret" issued by an interrupt handler will incorrectly
2645 2645 * clear NMI blocking.
2646 2646 */
2647 2647 static __inline void
2648 2648 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2649 2649 {
2650 2650 uint32_t intr_info;
2651 2651
2652 2652 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2653 2653
2654 2654 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2655 2655 return;
2656 2656
2657 2657 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2658 2658 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2659 2659 ("VM exit interruption info invalid: %x", intr_info));
2660 2660
2661 2661 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2662 2662 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2663 2663 "to NMI has invalid vector: %x", intr_info));
2664 2664 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2665 2665 #ifdef __FreeBSD__
2666 2666 __asm __volatile("int $2");
2667 2667 #else
2668 2668 vmm_call_trap(T_NMIFLT);
2669 2669 #endif
2670 2670 }
2671 2671 }
2672 2672
2673 2673 static __inline void
2674 2674 vmx_dr_enter_guest(struct vmxctx *vmxctx)
2675 2675 {
2676 2676 uint64_t rflags;
2677 2677
2678 2678 /* Save host control debug registers. */
2679 2679 vmxctx->host_dr7 = rdr7();
2680 2680 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR);
2681 2681
2682 2682 /*
2683 2683 * Disable debugging in DR7 and DEBUGCTL to avoid triggering
2684 2684 * exceptions in the host based on the guest DRx values. The
2685 2685 * guest DR7 and DEBUGCTL are saved/restored in the VMCS.
2686 2686 */
2687 2687 load_dr7(0);
2688 2688 wrmsr(MSR_DEBUGCTLMSR, 0);
2689 2689
2690 2690 /*
2691 2691 * Disable single stepping the kernel to avoid corrupting the
2692 2692 * guest DR6. A debugger might still be able to corrupt the
2693 2693 * guest DR6 by setting a breakpoint after this point and then
2694 2694 * single stepping.
2695 2695 */
2696 2696 rflags = read_rflags();
2697 2697 vmxctx->host_tf = rflags & PSL_T;
2698 2698 write_rflags(rflags & ~PSL_T);
2699 2699
2700 2700 /* Save host debug registers. */
2701 2701 vmxctx->host_dr0 = rdr0();
2702 2702 vmxctx->host_dr1 = rdr1();
2703 2703 vmxctx->host_dr2 = rdr2();
2704 2704 vmxctx->host_dr3 = rdr3();
2705 2705 vmxctx->host_dr6 = rdr6();
2706 2706
2707 2707 /* Restore guest debug registers. */
2708 2708 load_dr0(vmxctx->guest_dr0);
2709 2709 load_dr1(vmxctx->guest_dr1);
2710 2710 load_dr2(vmxctx->guest_dr2);
2711 2711 load_dr3(vmxctx->guest_dr3);
2712 2712 load_dr6(vmxctx->guest_dr6);
2713 2713 }
2714 2714
2715 2715 static __inline void
2716 2716 vmx_dr_leave_guest(struct vmxctx *vmxctx)
2717 2717 {
2718 2718
2719 2719 /* Save guest debug registers. */
2720 2720 vmxctx->guest_dr0 = rdr0();
2721 2721 vmxctx->guest_dr1 = rdr1();
2722 2722 vmxctx->guest_dr2 = rdr2();
2723 2723 vmxctx->guest_dr3 = rdr3();
2724 2724 vmxctx->guest_dr6 = rdr6();
2725 2725
2726 2726 /*
2727 2727 * Restore host debug registers. Restore DR7, DEBUGCTL, and
2728 2728 * PSL_T last.
2729 2729 */
2730 2730 load_dr0(vmxctx->host_dr0);
|
↓ open down ↓ |
2730 lines elided |
↑ open up ↑ |
2731 2731 load_dr1(vmxctx->host_dr1);
2732 2732 load_dr2(vmxctx->host_dr2);
2733 2733 load_dr3(vmxctx->host_dr3);
2734 2734 load_dr6(vmxctx->host_dr6);
2735 2735 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl);
2736 2736 load_dr7(vmxctx->host_dr7);
2737 2737 write_rflags(read_rflags() | vmxctx->host_tf);
2738 2738 }
2739 2739
2740 2740 static int
2741 -vmx_run(void *arg, int vcpu, uint64_t rip, pmap_t pmap,
2742 - struct vm_eventinfo *evinfo)
2741 +vmx_run(void *arg, int vcpu, uint64_t rip, pmap_t pmap)
2743 2742 {
2744 2743 int rc, handled, launched;
2745 2744 struct vmx *vmx;
2746 2745 struct vm *vm;
2747 2746 struct vmxctx *vmxctx;
2748 2747 uintptr_t vmcs_pa;
2749 2748 struct vm_exit *vmexit;
2750 2749 struct vlapic *vlapic;
2751 2750 uint32_t exit_reason;
2752 2751 #ifdef __FreeBSD__
2753 2752 struct region_descriptor gdtr, idtr;
2754 2753 uint16_t ldt_sel;
2755 2754 #endif
2756 2755 bool tpr_shadow_active;
2757 2756
2758 2757 vmx = arg;
2759 2758 vm = vmx->vm;
2760 2759 vmcs_pa = vmx->vmcs_pa[vcpu];
2761 2760 vmxctx = &vmx->ctx[vcpu];
2762 2761 vlapic = vm_lapic(vm, vcpu);
2763 2762 vmexit = vm_exitinfo(vm, vcpu);
2764 2763 launched = 0;
2765 2764 tpr_shadow_active = vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW) &&
2766 2765 !vmx_cap_en(vmx, VMX_CAP_APICV) &&
2767 2766 (vmx->cap[vcpu].proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0;
2768 2767
2769 2768 KASSERT(vmxctx->pmap == pmap,
2770 2769 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2771 2770
2772 2771 vmx_msr_guest_enter(vmx, vcpu);
2773 2772
2774 2773 vmcs_load(vmcs_pa);
2775 2774
2776 2775 #ifndef __FreeBSD__
2777 2776 VERIFY(vmx->vmcs_state[vcpu] == VS_NONE && curthread->t_preempt != 0);
2778 2777 vmx->vmcs_state[vcpu] = VS_LOADED;
2779 2778 #endif
2780 2779
2781 2780 /*
2782 2781 * XXX
2783 2782 * We do this every time because we may setup the virtual machine
2784 2783 * from a different process than the one that actually runs it.
2785 2784 *
2786 2785 * If the life of a virtual machine was spent entirely in the context
2787 2786 * of a single process we could do this once in vmx_vminit().
2788 2787 */
2789 2788 vmcs_write(VMCS_HOST_CR3, rcr3());
2790 2789
2791 2790 vmcs_write(VMCS_GUEST_RIP, rip);
2792 2791 vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2793 2792 do {
2794 2793 enum event_inject_state inject_state;
2795 2794
2796 2795 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch "
2797 2796 "%lx/%lx", __func__, vmcs_guest_rip(), rip));
2798 2797
2799 2798 handled = UNHANDLED;
2800 2799
2801 2800 /*
2802 2801 * Perform initial event/exception/interrupt injection before
2803 2802 * host CPU interrupts are disabled.
2804 2803 */
2805 2804 inject_state = vmx_inject_events(vmx, vcpu, rip);
2806 2805
2807 2806 /*
2808 2807 * Interrupts are disabled from this point on until the
2809 2808 * guest starts executing. This is done for the following
2810 2809 * reasons:
2811 2810 *
2812 2811 * If an AST is asserted on this thread after the check below,
2813 2812 * then the IPI_AST notification will not be lost, because it
2814 2813 * will cause a VM exit due to external interrupt as soon as
2815 2814 * the guest state is loaded.
2816 2815 *
2817 2816 * A posted interrupt after vmx_inject_vlapic() will not be
2818 2817 * "lost" because it will be held pending in the host APIC
2819 2818 * because interrupts are disabled. The pending interrupt will
2820 2819 * be recognized as soon as the guest state is loaded.
2821 2820 *
2822 2821 * The same reasoning applies to the IPI generated by
2823 2822 * pmap_invalidate_ept().
2824 2823 */
2825 2824 disable_intr();
2826 2825
|
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
2827 2826 /*
2828 2827 * If not precluded by existing events, inject any interrupt
2829 2828 * pending on the vLAPIC. As a lock-less operation, it is safe
2830 2829 * (and prudent) to perform with host CPU interrupts disabled.
2831 2830 */
2832 2831 if (inject_state == EIS_CAN_INJECT) {
2833 2832 inject_state = vmx_inject_vlapic(vmx, vcpu, vlapic);
2834 2833 }
2835 2834
2836 2835 /*
2837 - * Check for vcpu suspension after injecting events because
2838 - * vmx_inject_events() can suspend the vcpu due to a
2839 - * triple fault.
2836 + * Check for vCPU bail-out conditions. This must be done after
2837 + * vmx_inject_events() to detect a triple-fault condition.
2840 2838 */
2841 - if (vcpu_suspended(evinfo)) {
2839 + if (vcpu_entry_bailout_checks(vmx->vm, vcpu, rip)) {
2842 2840 enable_intr();
2843 - vm_exit_suspended(vmx->vm, vcpu, rip);
2844 2841 break;
2845 2842 }
2846 2843
2847 - if (vcpu_runblocked(evinfo)) {
2844 + if (vcpu_run_state_pending(vm, vcpu)) {
2848 2845 enable_intr();
2849 - vm_exit_runblock(vmx->vm, vcpu, rip);
2846 + vm_exit_run_state(vmx->vm, vcpu, rip);
2850 2847 break;
2851 2848 }
2852 2849
2853 - if (vcpu_reqidle(evinfo)) {
2854 - enable_intr();
2855 - vm_exit_reqidle(vmx->vm, vcpu, rip);
2856 - break;
2857 - }
2858 -
2859 - if (vcpu_should_yield(vm, vcpu)) {
2860 - enable_intr();
2861 - vm_exit_astpending(vmx->vm, vcpu, rip);
2862 - vmx_astpending_trace(vmx, vcpu, rip);
2863 - handled = HANDLED;
2864 - break;
2865 - }
2866 -
2867 - if (vcpu_debugged(vm, vcpu)) {
2868 - enable_intr();
2869 - vm_exit_debug(vmx->vm, vcpu, rip);
2870 - break;
2871 - }
2872 -
2873 2850 /*
2874 2851 * If subsequent activity queued events which require injection
2875 2852 * handling, take another lap to handle them.
2876 2853 */
2877 2854 if (vmx_inject_recheck(vmx, vcpu, inject_state)) {
2878 2855 enable_intr();
2879 2856 handled = HANDLED;
2880 2857 continue;
2881 2858 }
2882 2859
2883 2860 #ifndef __FreeBSD__
2884 2861 if ((rc = smt_acquire()) != 1) {
2885 2862 enable_intr();
2886 2863 vmexit->rip = rip;
2887 2864 vmexit->inst_length = 0;
2888 2865 if (rc == -1) {
2889 2866 vmexit->exitcode = VM_EXITCODE_HT;
2890 2867 } else {
2891 2868 vmexit->exitcode = VM_EXITCODE_BOGUS;
2892 2869 handled = HANDLED;
2893 2870 }
2894 2871 break;
2895 2872 }
2896 2873
2897 2874 /*
2898 2875 * If this thread has gone off-cpu due to mutex operations
2899 2876 * during vmx_run, the VMCS will have been unloaded, forcing a
2900 2877 * re-VMLAUNCH as opposed to VMRESUME.
2901 2878 */
2902 2879 launched = (vmx->vmcs_state[vcpu] & VS_LAUNCHED) != 0;
2903 2880 /*
2904 2881 * Restoration of the GDT limit is taken care of by
2905 2882 * vmx_savectx(). Since the maximum practical index for the
2906 2883 * IDT is 255, restoring its limits from the post-VMX-exit
2907 2884 * default of 0xffff is not a concern.
2908 2885 *
2909 2886 * Only 64-bit hypervisor callers are allowed, which forgoes
2910 2887 * the need to restore any LDT descriptor. Toss an error to
2911 2888 * anyone attempting to break that rule.
2912 2889 */
2913 2890 if (curproc->p_model != DATAMODEL_LP64) {
2914 2891 smt_release();
2915 2892 enable_intr();
2916 2893 bzero(vmexit, sizeof (*vmexit));
2917 2894 vmexit->rip = rip;
2918 2895 vmexit->exitcode = VM_EXITCODE_VMX;
2919 2896 vmexit->u.vmx.status = VM_FAIL_INVALID;
2920 2897 handled = UNHANDLED;
2921 2898 break;
2922 2899 }
2923 2900 #else
2924 2901 /*
2925 2902 * VM exits restore the base address but not the
2926 2903 * limits of GDTR and IDTR. The VMCS only stores the
2927 2904 * base address, so VM exits set the limits to 0xffff.
2928 2905 * Save and restore the full GDTR and IDTR to restore
2929 2906 * the limits.
2930 2907 *
2931 2908 * The VMCS does not save the LDTR at all, and VM
2932 2909 * exits clear LDTR as if a NULL selector were loaded.
2933 2910 * The userspace hypervisor probably doesn't use a
2934 2911 * LDT, but save and restore it to be safe.
2935 2912 */
2936 2913 sgdt(&gdtr);
2937 2914 sidt(&idtr);
2938 2915 ldt_sel = sldt();
2939 2916 #endif
2940 2917
2941 2918 if (tpr_shadow_active) {
2942 2919 vmx_tpr_shadow_enter(vlapic);
2943 2920 }
2944 2921
2945 2922 vmx_run_trace(vmx, vcpu);
2946 2923 vmx_dr_enter_guest(vmxctx);
2947 2924 rc = vmx_enter_guest(vmxctx, vmx, launched);
2948 2925 vmx_dr_leave_guest(vmxctx);
2949 2926
2950 2927 #ifndef __FreeBSD__
2951 2928 vmx->vmcs_state[vcpu] |= VS_LAUNCHED;
2952 2929 smt_release();
2953 2930 #else
2954 2931 bare_lgdt(&gdtr);
2955 2932 lidt(&idtr);
2956 2933 lldt(ldt_sel);
2957 2934 #endif
2958 2935
2959 2936 if (tpr_shadow_active) {
2960 2937 vmx_tpr_shadow_exit(vlapic);
2961 2938 }
2962 2939
2963 2940 /* Collect some information for VM exit processing */
2964 2941 vmexit->rip = rip = vmcs_guest_rip();
2965 2942 vmexit->inst_length = vmexit_instruction_length();
2966 2943 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2967 2944 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2968 2945
2969 2946 /* Update 'nextrip' */
2970 2947 vmx->state[vcpu].nextrip = rip;
2971 2948
2972 2949 if (rc == VMX_GUEST_VMEXIT) {
2973 2950 vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2974 2951 enable_intr();
2975 2952 handled = vmx_exit_process(vmx, vcpu, vmexit);
2976 2953 } else {
2977 2954 enable_intr();
|
↓ open down ↓ |
95 lines elided |
↑ open up ↑ |
2978 2955 vmx_exit_inst_error(vmxctx, rc, vmexit);
2979 2956 }
2980 2957 #ifdef __FreeBSD__
2981 2958 launched = 1;
2982 2959 #endif
2983 2960 DTRACE_PROBE3(vmm__vexit, int, vcpu, uint64_t, rip,
2984 2961 uint32_t, exit_reason);
2985 2962 rip = vmexit->rip;
2986 2963 } while (handled);
2987 2964
2988 - /*
2989 - * If a VM exit has been handled then the exitcode must be BOGUS
2990 - * If a VM exit is not handled then the exitcode must not be BOGUS
2991 - */
2992 - if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2993 - (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2994 - panic("Mismatch between handled (%d) and exitcode (%d)",
2995 - handled, vmexit->exitcode);
2965 + /* If a VM exit has been handled then the exitcode must be BOGUS */
2966 + if (handled && vmexit->exitcode != VM_EXITCODE_BOGUS) {
2967 + panic("Non-BOGUS exitcode (%d) unexpected for handled VM exit",
2968 + vmexit->exitcode);
2996 2969 }
2997 2970
2998 - if (!handled)
2999 - vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
3000 -
3001 2971 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
3002 2972 vmexit->exitcode);
3003 2973
3004 2974 vmcs_clear(vmcs_pa);
3005 2975 vmx_msr_guest_exit(vmx, vcpu);
3006 2976
3007 2977 #ifndef __FreeBSD__
3008 2978 VERIFY(vmx->vmcs_state != VS_NONE && curthread->t_preempt != 0);
3009 2979 vmx->vmcs_state[vcpu] = VS_NONE;
3010 2980 #endif
3011 2981
3012 2982 return (0);
3013 2983 }
3014 2984
3015 2985 static void
3016 2986 vmx_vmcleanup(void *arg)
3017 2987 {
3018 2988 int i;
3019 2989 struct vmx *vmx = arg;
3020 2990 uint16_t maxcpus;
3021 2991
3022 2992 if (apic_access_virtualization(vmx, 0))
3023 2993 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3024 2994
3025 2995 maxcpus = vm_get_maxcpus(vmx->vm);
3026 2996 for (i = 0; i < maxcpus; i++)
3027 2997 vpid_free(vmx->state[i].vpid);
3028 2998
3029 2999 free(vmx, M_VMX);
3030 3000 }
3031 3001
3032 3002 static uint64_t *
3033 3003 vmxctx_regptr(struct vmxctx *vmxctx, int reg)
3034 3004 {
3035 3005 switch (reg) {
3036 3006 case VM_REG_GUEST_RAX:
3037 3007 return (&vmxctx->guest_rax);
3038 3008 case VM_REG_GUEST_RBX:
3039 3009 return (&vmxctx->guest_rbx);
3040 3010 case VM_REG_GUEST_RCX:
3041 3011 return (&vmxctx->guest_rcx);
3042 3012 case VM_REG_GUEST_RDX:
3043 3013 return (&vmxctx->guest_rdx);
3044 3014 case VM_REG_GUEST_RSI:
3045 3015 return (&vmxctx->guest_rsi);
3046 3016 case VM_REG_GUEST_RDI:
3047 3017 return (&vmxctx->guest_rdi);
3048 3018 case VM_REG_GUEST_RBP:
3049 3019 return (&vmxctx->guest_rbp);
3050 3020 case VM_REG_GUEST_R8:
3051 3021 return (&vmxctx->guest_r8);
3052 3022 case VM_REG_GUEST_R9:
3053 3023 return (&vmxctx->guest_r9);
3054 3024 case VM_REG_GUEST_R10:
3055 3025 return (&vmxctx->guest_r10);
3056 3026 case VM_REG_GUEST_R11:
3057 3027 return (&vmxctx->guest_r11);
3058 3028 case VM_REG_GUEST_R12:
3059 3029 return (&vmxctx->guest_r12);
3060 3030 case VM_REG_GUEST_R13:
3061 3031 return (&vmxctx->guest_r13);
3062 3032 case VM_REG_GUEST_R14:
3063 3033 return (&vmxctx->guest_r14);
3064 3034 case VM_REG_GUEST_R15:
3065 3035 return (&vmxctx->guest_r15);
3066 3036 case VM_REG_GUEST_CR2:
3067 3037 return (&vmxctx->guest_cr2);
3068 3038 case VM_REG_GUEST_DR0:
3069 3039 return (&vmxctx->guest_dr0);
3070 3040 case VM_REG_GUEST_DR1:
3071 3041 return (&vmxctx->guest_dr1);
3072 3042 case VM_REG_GUEST_DR2:
3073 3043 return (&vmxctx->guest_dr2);
3074 3044 case VM_REG_GUEST_DR3:
3075 3045 return (&vmxctx->guest_dr3);
3076 3046 case VM_REG_GUEST_DR6:
3077 3047 return (&vmxctx->guest_dr6);
3078 3048 default:
3079 3049 break;
3080 3050 }
3081 3051 return (NULL);
3082 3052 }
3083 3053
3084 3054 static int
3085 3055 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
3086 3056 {
3087 3057 int running, hostcpu, err;
3088 3058 struct vmx *vmx = arg;
3089 3059 uint64_t *regp;
3090 3060
3091 3061 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3092 3062 if (running && hostcpu != curcpu)
3093 3063 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
3094 3064
3095 3065 /* VMCS access not required for ctx reads */
3096 3066 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) {
3097 3067 *retval = *regp;
3098 3068 return (0);
3099 3069 }
3100 3070
3101 3071 if (!running) {
3102 3072 vmcs_load(vmx->vmcs_pa[vcpu]);
3103 3073 }
3104 3074
3105 3075 err = EINVAL;
3106 3076 if (reg == VM_REG_GUEST_INTR_SHADOW) {
3107 3077 uint64_t gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
3108 3078 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
3109 3079 err = 0;
3110 3080 } else {
3111 3081 uint32_t encoding;
3112 3082
3113 3083 encoding = vmcs_field_encoding(reg);
3114 3084 if (encoding != VMCS_INVALID_ENCODING) {
3115 3085 *retval = vmcs_read(encoding);
3116 3086 err = 0;
3117 3087 }
3118 3088 }
3119 3089
3120 3090 if (!running) {
3121 3091 vmcs_clear(vmx->vmcs_pa[vcpu]);
3122 3092 }
3123 3093
3124 3094 return (err);
3125 3095 }
3126 3096
3127 3097 static int
3128 3098 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
3129 3099 {
3130 3100 int running, hostcpu, error;
3131 3101 struct vmx *vmx = arg;
3132 3102 uint64_t *regp;
3133 3103
3134 3104 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3135 3105 if (running && hostcpu != curcpu)
3136 3106 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
3137 3107
3138 3108 /* VMCS access not required for ctx writes */
3139 3109 if ((regp = vmxctx_regptr(&vmx->ctx[vcpu], reg)) != NULL) {
3140 3110 *regp = val;
3141 3111 return (0);
3142 3112 }
3143 3113
3144 3114 if (!running) {
3145 3115 vmcs_load(vmx->vmcs_pa[vcpu]);
3146 3116 }
3147 3117
3148 3118 if (reg == VM_REG_GUEST_INTR_SHADOW) {
3149 3119 if (val != 0) {
3150 3120 /*
3151 3121 * Forcing the vcpu into an interrupt shadow is not
3152 3122 * presently supported.
3153 3123 */
3154 3124 error = EINVAL;
3155 3125 } else {
3156 3126 uint64_t gi;
3157 3127
3158 3128 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
3159 3129 gi &= ~HWINTR_BLOCKING;
3160 3130 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
3161 3131 error = 0;
3162 3132 }
3163 3133 } else {
3164 3134 uint32_t encoding;
3165 3135
3166 3136 error = 0;
3167 3137 encoding = vmcs_field_encoding(reg);
3168 3138 switch (encoding) {
3169 3139 case VMCS_GUEST_IA32_EFER:
3170 3140 /*
3171 3141 * If the "load EFER" VM-entry control is 1 then the
3172 3142 * value of EFER.LMA must be identical to "IA-32e mode
3173 3143 * guest" bit in the VM-entry control.
3174 3144 */
3175 3145 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0) {
3176 3146 uint64_t ctls;
3177 3147
3178 3148 ctls = vmcs_read(VMCS_ENTRY_CTLS);
3179 3149 if (val & EFER_LMA) {
3180 3150 ctls |= VM_ENTRY_GUEST_LMA;
3181 3151 } else {
3182 3152 ctls &= ~VM_ENTRY_GUEST_LMA;
3183 3153 }
3184 3154 vmcs_write(VMCS_ENTRY_CTLS, ctls);
3185 3155 }
3186 3156 vmcs_write(encoding, val);
3187 3157 break;
3188 3158 case VMCS_GUEST_CR0:
3189 3159 /*
3190 3160 * The guest is not allowed to modify certain bits in
3191 3161 * %cr0 and %cr4. To maintain the illusion of full
3192 3162 * control, they have shadow versions which contain the
3193 3163 * guest-perceived (via reads from the register) values
3194 3164 * as opposed to the guest-effective values.
3195 3165 *
3196 3166 * This is detailed in the SDM: Vol. 3 Ch. 24.6.6.
3197 3167 */
3198 3168 vmcs_write(VMCS_CR0_SHADOW, val);
3199 3169 vmcs_write(encoding, vmx_fix_cr0(val));
3200 3170 break;
3201 3171 case VMCS_GUEST_CR4:
3202 3172 /* See above for detail on %cr4 shadowing */
3203 3173 vmcs_write(VMCS_CR4_SHADOW, val);
3204 3174 vmcs_write(encoding, vmx_fix_cr4(val));
3205 3175 break;
3206 3176 case VMCS_GUEST_CR3:
3207 3177 vmcs_write(encoding, val);
3208 3178 /*
3209 3179 * Invalidate the guest vcpu's TLB mappings to emulate
3210 3180 * the behavior of updating %cr3.
3211 3181 *
3212 3182 * XXX the processor retains global mappings when %cr3
3213 3183 * is updated but vmx_invvpid() does not.
3214 3184 */
3215 3185 vmx_invvpid(vmx, vcpu, vmx->ctx[vcpu].pmap, running);
3216 3186 break;
3217 3187 case VMCS_INVALID_ENCODING:
3218 3188 error = EINVAL;
3219 3189 break;
3220 3190 default:
3221 3191 vmcs_write(encoding, val);
3222 3192 break;
3223 3193 }
3224 3194 }
3225 3195
3226 3196 if (!running) {
3227 3197 vmcs_clear(vmx->vmcs_pa[vcpu]);
3228 3198 }
3229 3199
3230 3200 return (error);
3231 3201 }
3232 3202
3233 3203 static int
3234 3204 vmx_getdesc(void *arg, int vcpu, int seg, struct seg_desc *desc)
3235 3205 {
3236 3206 int hostcpu, running;
3237 3207 struct vmx *vmx = arg;
3238 3208 uint32_t base, limit, access;
3239 3209
3240 3210 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3241 3211 if (running && hostcpu != curcpu)
3242 3212 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
3243 3213
3244 3214 if (!running) {
3245 3215 vmcs_load(vmx->vmcs_pa[vcpu]);
3246 3216 }
3247 3217
3248 3218 vmcs_seg_desc_encoding(seg, &base, &limit, &access);
3249 3219 desc->base = vmcs_read(base);
3250 3220 desc->limit = vmcs_read(limit);
3251 3221 if (access != VMCS_INVALID_ENCODING) {
3252 3222 desc->access = vmcs_read(access);
3253 3223 } else {
|
↓ open down ↓ |
243 lines elided |
↑ open up ↑ |
3254 3224 desc->access = 0;
3255 3225 }
3256 3226
3257 3227 if (!running) {
3258 3228 vmcs_clear(vmx->vmcs_pa[vcpu]);
3259 3229 }
3260 3230 return (0);
3261 3231 }
3262 3232
3263 3233 static int
3264 -vmx_setdesc(void *arg, int vcpu, int seg, struct seg_desc *desc)
3234 +vmx_setdesc(void *arg, int vcpu, int seg, const struct seg_desc *desc)
3265 3235 {
3266 3236 int hostcpu, running;
3267 3237 struct vmx *vmx = arg;
3268 3238 uint32_t base, limit, access;
3269 3239
3270 3240 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3271 3241 if (running && hostcpu != curcpu)
3272 3242 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
3273 3243
3274 3244 if (!running) {
3275 3245 vmcs_load(vmx->vmcs_pa[vcpu]);
3276 3246 }
3277 3247
3278 3248 vmcs_seg_desc_encoding(seg, &base, &limit, &access);
3279 3249 vmcs_write(base, desc->base);
3280 3250 vmcs_write(limit, desc->limit);
3281 3251 if (access != VMCS_INVALID_ENCODING) {
3282 3252 vmcs_write(access, desc->access);
3283 3253 }
3284 3254
3285 3255 if (!running) {
3286 3256 vmcs_clear(vmx->vmcs_pa[vcpu]);
3287 3257 }
3288 3258 return (0);
3289 3259 }
3290 3260
3291 3261 static int
3292 3262 vmx_getcap(void *arg, int vcpu, int type, int *retval)
3293 3263 {
3294 3264 struct vmx *vmx = arg;
3295 3265 int vcap;
3296 3266 int ret;
3297 3267
3298 3268 ret = ENOENT;
3299 3269
3300 3270 vcap = vmx->cap[vcpu].set;
3301 3271
3302 3272 switch (type) {
3303 3273 case VM_CAP_HALT_EXIT:
3304 3274 if (cap_halt_exit)
3305 3275 ret = 0;
3306 3276 break;
3307 3277 case VM_CAP_PAUSE_EXIT:
3308 3278 if (cap_pause_exit)
3309 3279 ret = 0;
3310 3280 break;
3311 3281 case VM_CAP_MTRAP_EXIT:
3312 3282 if (cap_monitor_trap)
3313 3283 ret = 0;
3314 3284 break;
3315 3285 case VM_CAP_ENABLE_INVPCID:
3316 3286 if (cap_invpcid)
3317 3287 ret = 0;
3318 3288 break;
3319 3289 case VM_CAP_BPT_EXIT:
3320 3290 ret = 0;
3321 3291 break;
3322 3292 default:
3323 3293 break;
3324 3294 }
3325 3295
3326 3296 if (ret == 0)
3327 3297 *retval = (vcap & (1 << type)) ? 1 : 0;
3328 3298
3329 3299 return (ret);
3330 3300 }
3331 3301
3332 3302 static int
3333 3303 vmx_setcap(void *arg, int vcpu, int type, int val)
3334 3304 {
3335 3305 struct vmx *vmx = arg;
3336 3306 uint32_t baseval, reg, flag;
3337 3307 uint32_t *pptr;
3338 3308 int error;
3339 3309
3340 3310 error = ENOENT;
3341 3311 pptr = NULL;
3342 3312
3343 3313 switch (type) {
3344 3314 case VM_CAP_HALT_EXIT:
3345 3315 if (cap_halt_exit) {
3346 3316 error = 0;
3347 3317 pptr = &vmx->cap[vcpu].proc_ctls;
3348 3318 baseval = *pptr;
3349 3319 flag = PROCBASED_HLT_EXITING;
3350 3320 reg = VMCS_PRI_PROC_BASED_CTLS;
3351 3321 }
3352 3322 break;
3353 3323 case VM_CAP_MTRAP_EXIT:
3354 3324 if (cap_monitor_trap) {
3355 3325 error = 0;
3356 3326 pptr = &vmx->cap[vcpu].proc_ctls;
3357 3327 baseval = *pptr;
3358 3328 flag = PROCBASED_MTF;
3359 3329 reg = VMCS_PRI_PROC_BASED_CTLS;
3360 3330 }
3361 3331 break;
3362 3332 case VM_CAP_PAUSE_EXIT:
3363 3333 if (cap_pause_exit) {
3364 3334 error = 0;
3365 3335 pptr = &vmx->cap[vcpu].proc_ctls;
3366 3336 baseval = *pptr;
3367 3337 flag = PROCBASED_PAUSE_EXITING;
3368 3338 reg = VMCS_PRI_PROC_BASED_CTLS;
3369 3339 }
3370 3340 break;
3371 3341 case VM_CAP_ENABLE_INVPCID:
3372 3342 if (cap_invpcid) {
3373 3343 error = 0;
3374 3344 pptr = &vmx->cap[vcpu].proc_ctls2;
3375 3345 baseval = *pptr;
3376 3346 flag = PROCBASED2_ENABLE_INVPCID;
3377 3347 reg = VMCS_SEC_PROC_BASED_CTLS;
3378 3348 }
3379 3349 break;
3380 3350 case VM_CAP_BPT_EXIT:
3381 3351 error = 0;
3382 3352
3383 3353 /* Don't change the bitmap if we are tracing all exceptions. */
3384 3354 if (vmx->cap[vcpu].exc_bitmap != 0xffffffff) {
3385 3355 pptr = &vmx->cap[vcpu].exc_bitmap;
3386 3356 baseval = *pptr;
3387 3357 flag = (1 << IDT_BP);
3388 3358 reg = VMCS_EXCEPTION_BITMAP;
3389 3359 }
3390 3360 break;
3391 3361 default:
3392 3362 break;
3393 3363 }
3394 3364
3395 3365 if (error != 0) {
3396 3366 return (error);
3397 3367 }
3398 3368
3399 3369 if (pptr != NULL) {
3400 3370 if (val) {
3401 3371 baseval |= flag;
3402 3372 } else {
3403 3373 baseval &= ~flag;
3404 3374 }
3405 3375 vmcs_load(vmx->vmcs_pa[vcpu]);
3406 3376 vmcs_write(reg, baseval);
3407 3377 vmcs_clear(vmx->vmcs_pa[vcpu]);
3408 3378
3409 3379 /*
3410 3380 * Update optional stored flags, and record
3411 3381 * setting
3412 3382 */
3413 3383 *pptr = baseval;
3414 3384 }
3415 3385
3416 3386 if (val) {
3417 3387 vmx->cap[vcpu].set |= (1 << type);
3418 3388 } else {
3419 3389 vmx->cap[vcpu].set &= ~(1 << type);
3420 3390 }
3421 3391
3422 3392 return (0);
3423 3393 }
3424 3394
3425 3395 struct vlapic_vtx {
3426 3396 struct vlapic vlapic;
3427 3397
3428 3398 /* Align to the nearest cacheline */
3429 3399 uint8_t _pad[64 - (sizeof (struct vlapic) % 64)];
3430 3400
3431 3401 /* TMR handling state for posted interrupts */
3432 3402 uint32_t tmr_active[8];
3433 3403 uint32_t pending_level[8];
3434 3404 uint32_t pending_edge[8];
3435 3405
3436 3406 struct pir_desc *pir_desc;
3437 3407 struct vmx *vmx;
3438 3408 uint_t pending_prio;
3439 3409 boolean_t tmr_sync;
3440 3410 };
3441 3411
3442 3412 CTASSERT((offsetof(struct vlapic_vtx, tmr_active) & 63) == 0);
3443 3413
3444 3414 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4))
3445 3415
3446 3416 static vcpu_notify_t
3447 3417 vmx_apicv_set_ready(struct vlapic *vlapic, int vector, bool level)
3448 3418 {
3449 3419 struct vlapic_vtx *vlapic_vtx;
3450 3420 struct pir_desc *pir_desc;
3451 3421 uint32_t mask, tmrval;
3452 3422 int idx;
3453 3423 vcpu_notify_t notify = VCPU_NOTIFY_NONE;
3454 3424
3455 3425 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3456 3426 pir_desc = vlapic_vtx->pir_desc;
3457 3427 idx = vector / 32;
3458 3428 mask = 1UL << (vector % 32);
3459 3429
3460 3430 /*
3461 3431 * If the currently asserted TMRs do not match the state requested by
3462 3432 * the incoming interrupt, an exit will be required to reconcile those
3463 3433 * bits in the APIC page. This will keep the vLAPIC behavior in line
3464 3434 * with the architecturally defined expectations.
3465 3435 *
3466 3436 * If actors of mixed types (edge and level) are racing against the same
3467 3437 * vector (toggling its TMR bit back and forth), the results could
3468 3438 * inconsistent. Such circumstances are considered a rare edge case and
3469 3439 * are never expected to be found in the wild.
3470 3440 */
3471 3441 tmrval = atomic_load_acq_int(&vlapic_vtx->tmr_active[idx]);
3472 3442 if (!level) {
3473 3443 if ((tmrval & mask) != 0) {
3474 3444 /* Edge-triggered interrupt needs TMR de-asserted */
3475 3445 atomic_set_int(&vlapic_vtx->pending_edge[idx], mask);
3476 3446 atomic_store_rel_long(&pir_desc->pending, 1);
3477 3447 return (VCPU_NOTIFY_EXIT);
3478 3448 }
3479 3449 } else {
3480 3450 if ((tmrval & mask) == 0) {
3481 3451 /* Level-triggered interrupt needs TMR asserted */
3482 3452 atomic_set_int(&vlapic_vtx->pending_level[idx], mask);
3483 3453 atomic_store_rel_long(&pir_desc->pending, 1);
3484 3454 return (VCPU_NOTIFY_EXIT);
3485 3455 }
3486 3456 }
3487 3457
3488 3458 /*
3489 3459 * If the interrupt request does not require manipulation of the TMRs
3490 3460 * for delivery, set it in PIR descriptor. It cannot be inserted into
3491 3461 * the APIC page while the vCPU might be running.
3492 3462 */
3493 3463 atomic_set_int(&pir_desc->pir[idx], mask);
3494 3464
3495 3465 /*
3496 3466 * A notification is required whenever the 'pending' bit makes a
3497 3467 * transition from 0->1.
3498 3468 *
3499 3469 * Even if the 'pending' bit is already asserted, notification about
3500 3470 * the incoming interrupt may still be necessary. For example, if a
3501 3471 * vCPU is HLTed with a high PPR, a low priority interrupt would cause
3502 3472 * the 0->1 'pending' transition with a notification, but the vCPU
3503 3473 * would ignore the interrupt for the time being. The same vCPU would
3504 3474 * need to then be notified if a high-priority interrupt arrived which
3505 3475 * satisfied the PPR.
3506 3476 *
3507 3477 * The priorities of interrupts injected while 'pending' is asserted
3508 3478 * are tracked in a custom bitfield 'pending_prio'. Should the
3509 3479 * to-be-injected interrupt exceed the priorities already present, the
3510 3480 * notification is sent. The priorities recorded in 'pending_prio' are
3511 3481 * cleared whenever the 'pending' bit makes another 0->1 transition.
3512 3482 */
3513 3483 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) {
3514 3484 notify = VCPU_NOTIFY_APIC;
3515 3485 vlapic_vtx->pending_prio = 0;
3516 3486 } else {
3517 3487 const uint_t old_prio = vlapic_vtx->pending_prio;
3518 3488 const uint_t prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT);
3519 3489
3520 3490 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) {
3521 3491 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit);
3522 3492 notify = VCPU_NOTIFY_APIC;
3523 3493 }
3524 3494 }
3525 3495
3526 3496 return (notify);
3527 3497 }
3528 3498
3529 3499 static void
3530 3500 vmx_apicv_accepted(struct vlapic *vlapic, int vector)
3531 3501 {
3532 3502 /*
3533 3503 * When APICv is enabled for an instance, the traditional interrupt
3534 3504 * injection method (populating ENTRY_INTR_INFO in the VMCS) is not
3535 3505 * used and the CPU does the heavy lifting of virtual interrupt
3536 3506 * delivery. For that reason vmx_intr_accepted() should never be called
3537 3507 * when APICv is enabled.
3538 3508 */
3539 3509 panic("vmx_intr_accepted: not expected to be called");
3540 3510 }
3541 3511
3542 3512 static void
3543 3513 vmx_apicv_sync_tmr(struct vlapic *vlapic)
3544 3514 {
3545 3515 struct vlapic_vtx *vlapic_vtx;
3546 3516 const uint32_t *tmrs;
3547 3517
3548 3518 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3549 3519 tmrs = &vlapic_vtx->tmr_active[0];
3550 3520
3551 3521 if (!vlapic_vtx->tmr_sync) {
3552 3522 return;
3553 3523 }
3554 3524
3555 3525 vmcs_write(VMCS_EOI_EXIT0, ((uint64_t)tmrs[1] << 32) | tmrs[0]);
3556 3526 vmcs_write(VMCS_EOI_EXIT1, ((uint64_t)tmrs[3] << 32) | tmrs[2]);
3557 3527 vmcs_write(VMCS_EOI_EXIT2, ((uint64_t)tmrs[5] << 32) | tmrs[4]);
3558 3528 vmcs_write(VMCS_EOI_EXIT3, ((uint64_t)tmrs[7] << 32) | tmrs[6]);
3559 3529 vlapic_vtx->tmr_sync = B_FALSE;
3560 3530 }
3561 3531
3562 3532 static void
3563 3533 vmx_enable_x2apic_mode_ts(struct vlapic *vlapic)
3564 3534 {
3565 3535 struct vmx *vmx;
3566 3536 uint32_t proc_ctls;
3567 3537 int vcpuid;
3568 3538
3569 3539 vcpuid = vlapic->vcpuid;
3570 3540 vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3571 3541
3572 3542 proc_ctls = vmx->cap[vcpuid].proc_ctls;
3573 3543 proc_ctls &= ~PROCBASED_USE_TPR_SHADOW;
3574 3544 proc_ctls |= PROCBASED_CR8_LOAD_EXITING;
3575 3545 proc_ctls |= PROCBASED_CR8_STORE_EXITING;
3576 3546 vmx->cap[vcpuid].proc_ctls = proc_ctls;
3577 3547
3578 3548 vmcs_load(vmx->vmcs_pa[vcpuid]);
3579 3549 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, proc_ctls);
3580 3550 vmcs_clear(vmx->vmcs_pa[vcpuid]);
3581 3551 }
3582 3552
3583 3553 static void
3584 3554 vmx_enable_x2apic_mode_vid(struct vlapic *vlapic)
3585 3555 {
3586 3556 struct vmx *vmx;
3587 3557 uint32_t proc_ctls2;
3588 3558 int vcpuid, error;
3589 3559
3590 3560 vcpuid = vlapic->vcpuid;
3591 3561 vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3592 3562
3593 3563 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3594 3564 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3595 3565 ("%s: invalid proc_ctls2 %x", __func__, proc_ctls2));
3596 3566
3597 3567 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3598 3568 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3599 3569 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3600 3570
3601 3571 vmcs_load(vmx->vmcs_pa[vcpuid]);
3602 3572 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3603 3573 vmcs_clear(vmx->vmcs_pa[vcpuid]);
3604 3574
3605 3575 if (vlapic->vcpuid == 0) {
3606 3576 /*
3607 3577 * The nested page table mappings are shared by all vcpus
3608 3578 * so unmap the APIC access page just once.
3609 3579 */
3610 3580 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3611 3581 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3612 3582 __func__, error));
3613 3583
3614 3584 /*
3615 3585 * The MSR bitmap is shared by all vcpus so modify it only
3616 3586 * once in the context of vcpu 0.
3617 3587 */
3618 3588 error = vmx_allow_x2apic_msrs(vmx);
3619 3589 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3620 3590 __func__, error));
3621 3591 }
3622 3592 }
3623 3593
3624 3594 static void
3625 3595 vmx_apicv_notify(struct vlapic *vlapic, int hostcpu)
3626 3596 {
3627 3597 psm_send_pir_ipi(hostcpu);
3628 3598 }
3629 3599
3630 3600 static void
3631 3601 vmx_apicv_sync(struct vlapic *vlapic)
3632 3602 {
3633 3603 struct vlapic_vtx *vlapic_vtx;
3634 3604 struct pir_desc *pir_desc;
3635 3605 struct LAPIC *lapic;
3636 3606 uint_t i;
3637 3607
3638 3608 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3639 3609 pir_desc = vlapic_vtx->pir_desc;
3640 3610 lapic = vlapic->apic_page;
3641 3611
3642 3612 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3643 3613 return;
3644 3614 }
3645 3615
3646 3616 vlapic_vtx->pending_prio = 0;
3647 3617
3648 3618 /* Make sure the invalid (0-15) vectors are not set */
3649 3619 ASSERT0(vlapic_vtx->pending_level[0] & 0xffff);
3650 3620 ASSERT0(vlapic_vtx->pending_edge[0] & 0xffff);
3651 3621 ASSERT0(pir_desc->pir[0] & 0xffff);
3652 3622
3653 3623 for (i = 0; i <= 7; i++) {
3654 3624 uint32_t *tmrp = &lapic->tmr0 + (i * 4);
3655 3625 uint32_t *irrp = &lapic->irr0 + (i * 4);
3656 3626
3657 3627 const uint32_t pending_level =
3658 3628 atomic_readandclear_int(&vlapic_vtx->pending_level[i]);
3659 3629 const uint32_t pending_edge =
3660 3630 atomic_readandclear_int(&vlapic_vtx->pending_edge[i]);
3661 3631 const uint32_t pending_inject =
3662 3632 atomic_readandclear_int(&pir_desc->pir[i]);
3663 3633
3664 3634 if (pending_level != 0) {
3665 3635 /*
3666 3636 * Level-triggered interrupts assert their corresponding
3667 3637 * bit in the TMR when queued in IRR.
3668 3638 */
3669 3639 *tmrp |= pending_level;
3670 3640 *irrp |= pending_level;
3671 3641 }
3672 3642 if (pending_edge != 0) {
3673 3643 /*
3674 3644 * When queuing an edge-triggered interrupt in IRR, the
3675 3645 * corresponding bit in the TMR is cleared.
3676 3646 */
3677 3647 *tmrp &= ~pending_edge;
3678 3648 *irrp |= pending_edge;
3679 3649 }
3680 3650 if (pending_inject != 0) {
3681 3651 /*
3682 3652 * Interrupts which do not require a change to the TMR
3683 3653 * (because it already matches the necessary state) can
3684 3654 * simply be queued in IRR.
3685 3655 */
3686 3656 *irrp |= pending_inject;
3687 3657 }
3688 3658
3689 3659 if (*tmrp != vlapic_vtx->tmr_active[i]) {
3690 3660 /* Check if VMX EOI triggers require updating. */
3691 3661 vlapic_vtx->tmr_active[i] = *tmrp;
3692 3662 vlapic_vtx->tmr_sync = B_TRUE;
3693 3663 }
3694 3664 }
3695 3665 }
3696 3666
3697 3667 static void
3698 3668 vmx_tpr_shadow_enter(struct vlapic *vlapic)
3699 3669 {
3700 3670 /*
3701 3671 * When TPR shadowing is enabled, VMX will initiate a guest exit if its
3702 3672 * TPR falls below a threshold priority. That threshold is set to the
3703 3673 * current TPR priority, since guest interrupt status should be
3704 3674 * re-evaluated if its TPR is set lower.
3705 3675 */
3706 3676 vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic));
3707 3677 }
3708 3678
3709 3679 static void
3710 3680 vmx_tpr_shadow_exit(struct vlapic *vlapic)
3711 3681 {
3712 3682 /*
3713 3683 * Unlike full APICv, where changes to the TPR are reflected in the PPR,
3714 3684 * with TPR shadowing, that duty is relegated to the VMM. Upon exit,
3715 3685 * the PPR is updated to reflect any change in the TPR here.
3716 3686 */
3717 3687 vlapic_sync_tpr(vlapic);
3718 3688 }
3719 3689
3720 3690 static struct vlapic *
3721 3691 vmx_vlapic_init(void *arg, int vcpuid)
3722 3692 {
3723 3693 struct vmx *vmx;
3724 3694 struct vlapic *vlapic;
3725 3695 struct vlapic_vtx *vlapic_vtx;
3726 3696
3727 3697 vmx = arg;
3728 3698
3729 3699 vlapic = malloc(sizeof (struct vlapic_vtx), M_VLAPIC,
3730 3700 M_WAITOK | M_ZERO);
3731 3701 vlapic->vm = vmx->vm;
3732 3702 vlapic->vcpuid = vcpuid;
3733 3703 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3734 3704
3735 3705 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3736 3706 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3737 3707 vlapic_vtx->vmx = vmx;
3738 3708
3739 3709 if (vmx_cap_en(vmx, VMX_CAP_TPR_SHADOW)) {
3740 3710 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts;
3741 3711 }
3742 3712 if (vmx_cap_en(vmx, VMX_CAP_APICV)) {
3743 3713 vlapic->ops.set_intr_ready = vmx_apicv_set_ready;
3744 3714 vlapic->ops.sync_state = vmx_apicv_sync;
3745 3715 vlapic->ops.intr_accepted = vmx_apicv_accepted;
3746 3716 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_vid;
3747 3717
3748 3718 if (vmx_cap_en(vmx, VMX_CAP_APICV_PIR)) {
3749 3719 vlapic->ops.post_intr = vmx_apicv_notify;
3750 3720 }
3751 3721 }
3752 3722
3753 3723 vlapic_init(vlapic);
3754 3724
3755 3725 return (vlapic);
3756 3726 }
3757 3727
3758 3728 static void
3759 3729 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3760 3730 {
3761 3731
3762 3732 vlapic_cleanup(vlapic);
3763 3733 free(vlapic, M_VLAPIC);
3764 3734 }
3765 3735
3766 3736 #ifndef __FreeBSD__
3767 3737 static void
3768 3738 vmx_savectx(void *arg, int vcpu)
3769 3739 {
3770 3740 struct vmx *vmx = arg;
3771 3741
3772 3742 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) {
3773 3743 vmcs_clear(vmx->vmcs_pa[vcpu]);
3774 3744 vmx_msr_guest_exit(vmx, vcpu);
3775 3745 /*
3776 3746 * Having VMCLEARed the VMCS, it can no longer be re-entered
3777 3747 * with VMRESUME, but must be VMLAUNCHed again.
3778 3748 */
3779 3749 vmx->vmcs_state[vcpu] &= ~VS_LAUNCHED;
3780 3750 }
3781 3751
3782 3752 reset_gdtr_limit();
3783 3753 }
3784 3754
3785 3755 static void
3786 3756 vmx_restorectx(void *arg, int vcpu)
3787 3757 {
3788 3758 struct vmx *vmx = arg;
3789 3759
3790 3760 ASSERT0(vmx->vmcs_state[vcpu] & VS_LAUNCHED);
3791 3761
3792 3762 if ((vmx->vmcs_state[vcpu] & VS_LOADED) != 0) {
3793 3763 vmx_msr_guest_enter(vmx, vcpu);
3794 3764 vmcs_load(vmx->vmcs_pa[vcpu]);
3795 3765 }
3796 3766 }
3797 3767 #endif /* __FreeBSD__ */
3798 3768
3799 3769 struct vmm_ops vmm_ops_intel = {
3800 3770 .init = vmx_init,
3801 3771 .cleanup = vmx_cleanup,
3802 3772 .resume = vmx_restore,
3803 3773 .vminit = vmx_vminit,
3804 3774 .vmrun = vmx_run,
3805 3775 .vmcleanup = vmx_vmcleanup,
3806 3776 .vmgetreg = vmx_getreg,
3807 3777 .vmsetreg = vmx_setreg,
3808 3778 .vmgetdesc = vmx_getdesc,
3809 3779 .vmsetdesc = vmx_setdesc,
3810 3780 .vmgetcap = vmx_getcap,
3811 3781 .vmsetcap = vmx_setcap,
3812 3782 .vmspace_alloc = ept_vmspace_alloc,
3813 3783 .vmspace_free = ept_vmspace_free,
3814 3784 .vlapic_init = vmx_vlapic_init,
3815 3785 .vlapic_cleanup = vmx_vlapic_cleanup,
3816 3786
3817 3787 #ifndef __FreeBSD__
3818 3788 .vmsavectx = vmx_savectx,
3819 3789 .vmrestorectx = vmx_restorectx,
3820 3790 #endif
3821 3791 };
3822 3792
3823 3793 #ifndef __FreeBSD__
3824 3794 /* Side-effect free HW validation derived from checks in vmx_init. */
3825 3795 int
3826 3796 vmx_x86_supported(const char **msg)
3827 3797 {
3828 3798 int error;
3829 3799 uint32_t tmp;
3830 3800
3831 3801 ASSERT(msg != NULL);
3832 3802
3833 3803 /* Check support for primary processor-based VM-execution controls */
3834 3804 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
3835 3805 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_CTLS_ONE_SETTING,
3836 3806 PROCBASED_CTLS_ZERO_SETTING, &tmp);
3837 3807 if (error) {
3838 3808 *msg = "processor does not support desired primary "
3839 3809 "processor-based controls";
3840 3810 return (error);
3841 3811 }
3842 3812
3843 3813 /* Check support for secondary processor-based VM-execution controls */
3844 3814 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
3845 3815 MSR_VMX_PROCBASED_CTLS2, PROCBASED_CTLS2_ONE_SETTING,
3846 3816 PROCBASED_CTLS2_ZERO_SETTING, &tmp);
3847 3817 if (error) {
3848 3818 *msg = "processor does not support desired secondary "
3849 3819 "processor-based controls";
3850 3820 return (error);
3851 3821 }
3852 3822
3853 3823 /* Check support for pin-based VM-execution controls */
3854 3824 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
3855 3825 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_CTLS_ONE_SETTING,
3856 3826 PINBASED_CTLS_ZERO_SETTING, &tmp);
3857 3827 if (error) {
3858 3828 *msg = "processor does not support desired pin-based controls";
3859 3829 return (error);
3860 3830 }
3861 3831
3862 3832 /* Check support for VM-exit controls */
3863 3833 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
3864 3834 VM_EXIT_CTLS_ONE_SETTING, VM_EXIT_CTLS_ZERO_SETTING, &tmp);
3865 3835 if (error) {
3866 3836 *msg = "processor does not support desired exit controls";
3867 3837 return (error);
3868 3838 }
3869 3839
3870 3840 /* Check support for VM-entry controls */
3871 3841 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
3872 3842 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING, &tmp);
3873 3843 if (error) {
3874 3844 *msg = "processor does not support desired entry controls";
3875 3845 return (error);
3876 3846 }
3877 3847
3878 3848 /* Unrestricted guest is nominally optional, but not for us. */
3879 3849 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
3880 3850 PROCBASED2_UNRESTRICTED_GUEST, 0, &tmp);
3881 3851 if (error) {
3882 3852 *msg = "processor does not support desired unrestricted guest "
3883 3853 "controls";
3884 3854 return (error);
3885 3855 }
3886 3856
3887 3857 return (0);
3888 3858 }
3889 3859 #endif
|
↓ open down ↓ |
615 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX