1 /*-
   2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
   3  *
   4  * Copyright (c) 2011 NetApp, Inc.
   5  * All rights reserved.
   6  *
   7  * Redistribution and use in source and binary forms, with or without
   8  * modification, are permitted provided that the following conditions
   9  * are met:
  10  * 1. Redistributions of source code must retain the above copyright
  11  *    notice, this list of conditions and the following disclaimer.
  12  * 2. Redistributions in binary form must reproduce the above copyright
  13  *    notice, this list of conditions and the following disclaimer in the
  14  *    documentation and/or other materials provided with the distribution.
  15  *
  16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
  17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
  20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  26  * SUCH DAMAGE.
  27  *
  28  * $FreeBSD$
  29  */
  30 /*
  31  * This file and its contents are supplied under the terms of the
  32  * Common Development and Distribution License ("CDDL"), version 1.0.
  33  * You may only use this file in accordance with the terms of version
  34  * 1.0 of the CDDL.
  35  *
  36  * A full copy of the text of the CDDL should have accompanied this
  37  * source.  A copy of the CDDL is also available via the Internet at
  38  * http://www.illumos.org/license/CDDL.
  39  *
  40  * Copyright 2015 Pluribus Networks Inc.
  41  * Copyright 2019 Joyent, Inc.
  42  * Copyright 2020 Oxide Computer Company
  43  */
  44 
  45 #include <sys/cdefs.h>
  46 __FBSDID("$FreeBSD$");
  47 
  48 #include <sys/param.h>
  49 #include <sys/sysctl.h>
  50 #include <sys/ioctl.h>
  51 #ifdef  __FreeBSD__
  52 #include <sys/linker.h>
  53 #endif
  54 #include <sys/mman.h>
  55 #include <sys/module.h>
  56 #include <sys/_iovec.h>
  57 #include <sys/cpuset.h>
  58 
  59 #include <x86/segments.h>
  60 #include <machine/specialreg.h>
  61 
  62 #include <errno.h>
  63 #include <stdio.h>
  64 #include <stdlib.h>
  65 #include <assert.h>
  66 #include <string.h>
  67 #include <fcntl.h>
  68 #include <unistd.h>
  69 
  70 #include <libutil.h>
  71 
  72 #include <machine/vmm.h>
  73 #include <machine/vmm_dev.h>
  74 
  75 #include "vmmapi.h"
  76 
  77 #define MB      (1024 * 1024UL)
  78 #define GB      (1024 * 1024 * 1024UL)
  79 
  80 #ifndef __FreeBSD__
  81 /* shim to no-op for now */
  82 #define MAP_NOCORE              0
  83 #define MAP_ALIGNED_SUPER       0
  84 
  85 /* Rely on PROT_NONE for guard purposes */
  86 #define MAP_GUARD               (MAP_PRIVATE | MAP_ANON | MAP_NORESERVE)
  87 #endif
  88 
  89 /*
  90  * Size of the guard region before and after the virtual address space
  91  * mapping the guest physical memory. This must be a multiple of the
  92  * superpage size for performance reasons.
  93  */
  94 #define VM_MMAP_GUARD_SIZE      (4 * MB)
  95 
  96 #define PROT_RW         (PROT_READ | PROT_WRITE)
  97 #define PROT_ALL        (PROT_READ | PROT_WRITE | PROT_EXEC)
  98 
  99 struct vmctx {
 100         int     fd;
 101         uint32_t lowmem_limit;
 102         int     memflags;
 103         size_t  lowmem;
 104         size_t  highmem;
 105         char    *baseaddr;
 106         char    *name;
 107 };
 108 
 109 #ifdef  __FreeBSD__
 110 #define CREATE(x)  sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
 111 #define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
 112 #else
 113 #define CREATE(x)       vm_do_ctl(VMM_CREATE_VM, (x))
 114 #define DESTROY(x)      vm_do_ctl(VMM_DESTROY_VM, (x))
 115 
 116 static int
 117 vm_do_ctl(int cmd, const char *name)
 118 {
 119         int ctl_fd;
 120 
 121         ctl_fd = open(VMM_CTL_DEV, O_EXCL | O_RDWR);
 122         if (ctl_fd < 0) {
 123                 return (-1);
 124         }
 125 
 126         if (ioctl(ctl_fd, cmd, name) == -1) {
 127                 int err = errno;
 128 
 129                 /* Do not lose ioctl errno through the close(2) */
 130                 (void) close(ctl_fd);
 131                 errno = err;
 132                 return (-1);
 133         }
 134         (void) close(ctl_fd);
 135 
 136         return (0);
 137 }
 138 #endif
 139 
 140 static int
 141 vm_device_open(const char *name)
 142 {
 143         int fd, len;
 144         char *vmfile;
 145 
 146         len = strlen("/dev/vmm/") + strlen(name) + 1;
 147         vmfile = malloc(len);
 148         assert(vmfile != NULL);
 149         snprintf(vmfile, len, "/dev/vmm/%s", name);
 150 
 151         /* Open the device file */
 152         fd = open(vmfile, O_RDWR, 0);
 153 
 154         free(vmfile);
 155         return (fd);
 156 }
 157 
 158 int
 159 vm_create(const char *name)
 160 {
 161 #ifdef __FreeBSD__
 162         /* Try to load vmm(4) module before creating a guest. */
 163         if (modfind("vmm") < 0)
 164                 kldload("vmm");
 165 #endif
 166         return (CREATE((char *)name));
 167 }
 168 
 169 struct vmctx *
 170 vm_open(const char *name)
 171 {
 172         struct vmctx *vm;
 173 
 174         vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
 175         assert(vm != NULL);
 176 
 177         vm->fd = -1;
 178         vm->memflags = 0;
 179         vm->lowmem_limit = 3 * GB;
 180         vm->name = (char *)(vm + 1);
 181         strcpy(vm->name, name);
 182 
 183         if ((vm->fd = vm_device_open(vm->name)) < 0)
 184                 goto err;
 185 
 186         return (vm);
 187 err:
 188 #ifdef __FreeBSD__
 189         vm_destroy(vm);
 190 #else
 191         /*
 192          * As libvmmapi is used by other programs to query and control bhyve
 193          * VMs, destroying a VM just because the open failed isn't useful. We
 194          * have to free what we have allocated, though.
 195          */
 196         free(vm);
 197 #endif
 198         return (NULL);
 199 }
 200 
 201 #ifndef __FreeBSD__
 202 void
 203 vm_close(struct vmctx *vm)
 204 {
 205         assert(vm != NULL);
 206         assert(vm->fd >= 0);
 207 
 208         (void) close(vm->fd);
 209 
 210         free(vm);
 211 }
 212 #endif
 213 
 214 void
 215 vm_destroy(struct vmctx *vm)
 216 {
 217         assert(vm != NULL);
 218 
 219         if (vm->fd >= 0)
 220                 close(vm->fd);
 221         DESTROY(vm->name);
 222 
 223         free(vm);
 224 }
 225 
 226 int
 227 vm_parse_memsize(const char *optarg, size_t *ret_memsize)
 228 {
 229         char *endptr;
 230         size_t optval;
 231         int error;
 232 
 233         optval = strtoul(optarg, &endptr, 0);
 234         if (*optarg != '\0' && *endptr == '\0') {
 235                 /*
 236                  * For the sake of backward compatibility if the memory size
 237                  * specified on the command line is less than a megabyte then
 238                  * it is interpreted as being in units of MB.
 239                  */
 240                 if (optval < MB)
 241                         optval *= MB;
 242                 *ret_memsize = optval;
 243                 error = 0;
 244         } else
 245                 error = expand_number(optarg, ret_memsize);
 246 
 247         return (error);
 248 }
 249 
 250 uint32_t
 251 vm_get_lowmem_limit(struct vmctx *ctx)
 252 {
 253 
 254         return (ctx->lowmem_limit);
 255 }
 256 
 257 void
 258 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
 259 {
 260 
 261         ctx->lowmem_limit = limit;
 262 }
 263 
 264 void
 265 vm_set_memflags(struct vmctx *ctx, int flags)
 266 {
 267 
 268         ctx->memflags = flags;
 269 }
 270 
 271 int
 272 vm_get_memflags(struct vmctx *ctx)
 273 {
 274 
 275         return (ctx->memflags);
 276 }
 277 
 278 /*
 279  * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
 280  */
 281 int
 282 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off,
 283     size_t len, int prot)
 284 {
 285         struct vm_memmap memmap;
 286         int error, flags;
 287 
 288         memmap.gpa = gpa;
 289         memmap.segid = segid;
 290         memmap.segoff = off;
 291         memmap.len = len;
 292         memmap.prot = prot;
 293         memmap.flags = 0;
 294 
 295         if (ctx->memflags & VM_MEM_F_WIRED)
 296                 memmap.flags |= VM_MEMMAP_F_WIRED;
 297 
 298         /*
 299          * If this mapping already exists then don't create it again. This
 300          * is the common case for SYSMEM mappings created by bhyveload(8).
 301          */
 302         error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags);
 303         if (error == 0 && gpa == memmap.gpa) {
 304                 if (segid != memmap.segid || off != memmap.segoff ||
 305                     prot != memmap.prot || flags != memmap.flags) {
 306                         errno = EEXIST;
 307                         return (-1);
 308                 } else {
 309                         return (0);
 310                 }
 311         }
 312 
 313         error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap);
 314         return (error);
 315 }
 316 
 317 int
 318 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
 319     vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
 320 {
 321         struct vm_memmap memmap;
 322         int error;
 323 
 324         bzero(&memmap, sizeof(struct vm_memmap));
 325         memmap.gpa = *gpa;
 326         error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap);
 327         if (error == 0) {
 328                 *gpa = memmap.gpa;
 329                 *segid = memmap.segid;
 330                 *segoff = memmap.segoff;
 331                 *len = memmap.len;
 332                 *prot = memmap.prot;
 333                 *flags = memmap.flags;
 334         }
 335         return (error);
 336 }
 337 
 338 /*
 339  * Return 0 if the segments are identical and non-zero otherwise.
 340  *
 341  * This is slightly complicated by the fact that only device memory segments
 342  * are named.
 343  */
 344 static int
 345 cmpseg(size_t len, const char *str, size_t len2, const char *str2)
 346 {
 347 
 348         if (len == len2) {
 349                 if ((!str && !str2) || (str && str2 && !strcmp(str, str2)))
 350                         return (0);
 351         }
 352         return (-1);
 353 }
 354 
 355 static int
 356 vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name)
 357 {
 358         struct vm_memseg memseg;
 359         size_t n;
 360         int error;
 361 
 362         /*
 363          * If the memory segment has already been created then just return.
 364          * This is the usual case for the SYSMEM segment created by userspace
 365          * loaders like bhyveload(8).
 366          */
 367         error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name,
 368             sizeof(memseg.name));
 369         if (error)
 370                 return (error);
 371 
 372         if (memseg.len != 0) {
 373                 if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) {
 374                         errno = EINVAL;
 375                         return (-1);
 376                 } else {
 377                         return (0);
 378                 }
 379         }
 380 
 381         bzero(&memseg, sizeof(struct vm_memseg));
 382         memseg.segid = segid;
 383         memseg.len = len;
 384         if (name != NULL) {
 385                 n = strlcpy(memseg.name, name, sizeof(memseg.name));
 386                 if (n >= sizeof(memseg.name)) {
 387                         errno = ENAMETOOLONG;
 388                         return (-1);
 389                 }
 390         }
 391 
 392         error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg);
 393         return (error);
 394 }
 395 
 396 int
 397 vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf,
 398     size_t bufsize)
 399 {
 400         struct vm_memseg memseg;
 401         size_t n;
 402         int error;
 403 
 404         memseg.segid = segid;
 405         error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg);
 406         if (error == 0) {
 407                 *lenp = memseg.len;
 408                 n = strlcpy(namebuf, memseg.name, bufsize);
 409                 if (n >= bufsize) {
 410                         errno = ENAMETOOLONG;
 411                         error = -1;
 412                 }
 413         }
 414         return (error);
 415 }
 416 
 417 static int
 418 #ifdef __FreeBSD__
 419 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base)
 420 #else
 421 setup_memory_segment(struct vmctx *ctx, int segid, vm_paddr_t gpa, size_t len,
 422     char *base)
 423 #endif
 424 {
 425         char *ptr;
 426         int error, flags;
 427 
 428         /* Map 'len' bytes starting at 'gpa' in the guest address space */
 429 #ifdef __FreeBSD__
 430         error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL);
 431 #else
 432         /*
 433          * As we use two segments for lowmem/highmem the offset within the
 434          * segment is 0 on illumos.
 435          */
 436         error = vm_mmap_memseg(ctx, gpa, segid, 0, len, PROT_ALL);
 437 #endif
 438         if (error)
 439                 return (error);
 440 
 441         flags = MAP_SHARED | MAP_FIXED;
 442         if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
 443                 flags |= MAP_NOCORE;
 444 
 445         /* mmap into the process address space on the host */
 446         ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
 447         if (ptr == MAP_FAILED)
 448                 return (-1);
 449 
 450         return (0);
 451 }
 452 
 453 int
 454 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
 455 {
 456         size_t objsize, len;
 457         vm_paddr_t gpa;
 458         char *baseaddr, *ptr;
 459         int error;
 460 
 461         assert(vms == VM_MMAP_ALL);
 462 
 463         /*
 464          * If 'memsize' cannot fit entirely in the 'lowmem' segment then
 465          * create another 'highmem' segment above 4GB for the remainder.
 466          */
 467         if (memsize > ctx->lowmem_limit) {
 468                 ctx->lowmem = ctx->lowmem_limit;
 469                 ctx->highmem = memsize - ctx->lowmem_limit;
 470                 objsize = 4*GB + ctx->highmem;
 471         } else {
 472                 ctx->lowmem = memsize;
 473                 ctx->highmem = 0;
 474                 objsize = ctx->lowmem;
 475         }
 476 
 477 #ifdef __FreeBSD__
 478         error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL);
 479         if (error)
 480                 return (error);
 481 #endif
 482 
 483         /*
 484          * Stake out a contiguous region covering the guest physical memory
 485          * and the adjoining guard regions.
 486          */
 487         len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
 488         ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0);
 489         if (ptr == MAP_FAILED)
 490                 return (-1);
 491 
 492         baseaddr = ptr + VM_MMAP_GUARD_SIZE;
 493 
 494 #ifdef __FreeBSD__
 495         if (ctx->highmem > 0) {
 496                 gpa = 4*GB;
 497                 len = ctx->highmem;
 498                 error = setup_memory_segment(ctx, gpa, len, baseaddr);
 499                 if (error)
 500                         return (error);
 501         }
 502 
 503         if (ctx->lowmem > 0) {
 504                 gpa = 0;
 505                 len = ctx->lowmem;
 506                 error = setup_memory_segment(ctx, gpa, len, baseaddr);
 507                 if (error)
 508                         return (error);
 509         }
 510 #else
 511         if (ctx->highmem > 0) {
 512                 error = vm_alloc_memseg(ctx, VM_HIGHMEM, ctx->highmem, NULL);
 513                 if (error)
 514                         return (error);
 515                 gpa = 4*GB;
 516                 len = ctx->highmem;
 517                 error = setup_memory_segment(ctx, VM_HIGHMEM, gpa, len, baseaddr);
 518                 if (error)
 519                         return (error);
 520         }
 521 
 522         if (ctx->lowmem > 0) {
 523                 error = vm_alloc_memseg(ctx, VM_LOWMEM, ctx->lowmem, NULL);
 524                 if (error)
 525                         return (error);
 526                 gpa = 0;
 527                 len = ctx->lowmem;
 528                 error = setup_memory_segment(ctx, VM_LOWMEM, gpa, len, baseaddr);
 529                 if (error)
 530                         return (error);
 531         }
 532 #endif
 533 
 534         ctx->baseaddr = baseaddr;
 535 
 536         return (0);
 537 }
 538 
 539 /*
 540  * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
 541  * the lowmem or highmem regions.
 542  *
 543  * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
 544  * The instruction emulation code depends on this behavior.
 545  */
 546 void *
 547 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
 548 {
 549 
 550         if (ctx->lowmem > 0) {
 551                 if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
 552                     gaddr + len <= ctx->lowmem)
 553                         return (ctx->baseaddr + gaddr);
 554         }
 555 
 556         if (ctx->highmem > 0) {
 557                 if (gaddr >= 4*GB) {
 558                         if (gaddr < 4*GB + ctx->highmem &&
 559                             len <= ctx->highmem &&
 560                             gaddr + len <= 4*GB + ctx->highmem)
 561                                 return (ctx->baseaddr + gaddr);
 562                 }
 563         }
 564 
 565         return (NULL);
 566 }
 567 
 568 size_t
 569 vm_get_lowmem_size(struct vmctx *ctx)
 570 {
 571 
 572         return (ctx->lowmem);
 573 }
 574 
 575 size_t
 576 vm_get_highmem_size(struct vmctx *ctx)
 577 {
 578 
 579         return (ctx->highmem);
 580 }
 581 
 582 #ifndef __FreeBSD__
 583 int
 584 vm_get_devmem_offset(struct vmctx *ctx, int segid, off_t *mapoff)
 585 {
 586         struct vm_devmem_offset vdo;
 587         int error;
 588 
 589         vdo.segid = segid;
 590         error = ioctl(ctx->fd, VM_DEVMEM_GETOFFSET, &vdo);
 591         if (error == 0)
 592                 *mapoff = vdo.offset;
 593 
 594         return (error);
 595 }
 596 #endif
 597 
 598 void *
 599 vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
 600 {
 601 #ifdef  __FreeBSD__
 602         char pathname[MAXPATHLEN];
 603 #endif
 604         size_t len2;
 605         char *base, *ptr;
 606         int fd, error, flags;
 607         off_t mapoff;
 608 
 609         fd = -1;
 610         ptr = MAP_FAILED;
 611         if (name == NULL || strlen(name) == 0) {
 612                 errno = EINVAL;
 613                 goto done;
 614         }
 615 
 616         error = vm_alloc_memseg(ctx, segid, len, name);
 617         if (error)
 618                 goto done;
 619 
 620 #ifdef  __FreeBSD__
 621         strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname));
 622         strlcat(pathname, ctx->name, sizeof(pathname));
 623         strlcat(pathname, ".", sizeof(pathname));
 624         strlcat(pathname, name, sizeof(pathname));
 625 
 626         fd = open(pathname, O_RDWR);
 627         if (fd < 0)
 628                 goto done;
 629 #else
 630         if (vm_get_devmem_offset(ctx, segid, &mapoff) != 0)
 631                 goto done;
 632 #endif
 633 
 634         /*
 635          * Stake out a contiguous region covering the device memory and the
 636          * adjoining guard regions.
 637          */
 638         len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE;
 639         base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1,
 640             0);
 641         if (base == MAP_FAILED)
 642                 goto done;
 643 
 644         flags = MAP_SHARED | MAP_FIXED;
 645         if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
 646                 flags |= MAP_NOCORE;
 647 
 648 #ifdef  __FreeBSD__
 649         /* mmap the devmem region in the host address space */
 650         ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0);
 651 #else
 652         /* mmap the devmem region in the host address space */
 653         ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, ctx->fd,
 654             mapoff);
 655 #endif
 656 done:
 657         if (fd >= 0)
 658                 close(fd);
 659         return (ptr);
 660 }
 661 
 662 int
 663 vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
 664             uint64_t base, uint32_t limit, uint32_t access)
 665 {
 666         int error;
 667         struct vm_seg_desc vmsegdesc;
 668 
 669         bzero(&vmsegdesc, sizeof(vmsegdesc));
 670         vmsegdesc.cpuid = vcpu;
 671         vmsegdesc.regnum = reg;
 672         vmsegdesc.desc.base = base;
 673         vmsegdesc.desc.limit = limit;
 674         vmsegdesc.desc.access = access;
 675 
 676         error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
 677         return (error);
 678 }
 679 
 680 int
 681 vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
 682             uint64_t *base, uint32_t *limit, uint32_t *access)
 683 {
 684         int error;
 685         struct vm_seg_desc vmsegdesc;
 686 
 687         bzero(&vmsegdesc, sizeof(vmsegdesc));
 688         vmsegdesc.cpuid = vcpu;
 689         vmsegdesc.regnum = reg;
 690 
 691         error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
 692         if (error == 0) {
 693                 *base = vmsegdesc.desc.base;
 694                 *limit = vmsegdesc.desc.limit;
 695                 *access = vmsegdesc.desc.access;
 696         }
 697         return (error);
 698 }
 699 
 700 int
 701 vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *seg_desc)
 702 {
 703         int error;
 704 
 705         error = vm_get_desc(ctx, vcpu, reg, &seg_desc->base, &seg_desc->limit,
 706             &seg_desc->access);
 707         return (error);
 708 }
 709 
 710 int
 711 vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
 712 {
 713         int error;
 714         struct vm_register vmreg;
 715 
 716         bzero(&vmreg, sizeof(vmreg));
 717         vmreg.cpuid = vcpu;
 718         vmreg.regnum = reg;
 719         vmreg.regval = val;
 720 
 721         error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
 722         return (error);
 723 }
 724 
 725 int
 726 vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
 727 {
 728         int error;
 729         struct vm_register vmreg;
 730 
 731         bzero(&vmreg, sizeof(vmreg));
 732         vmreg.cpuid = vcpu;
 733         vmreg.regnum = reg;
 734 
 735         error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
 736         *ret_val = vmreg.regval;
 737         return (error);
 738 }
 739 
 740 int
 741 vm_set_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
 742     const int *regnums, uint64_t *regvals)
 743 {
 744         int error;
 745         struct vm_register_set vmregset;
 746 
 747         bzero(&vmregset, sizeof(vmregset));
 748         vmregset.cpuid = vcpu;
 749         vmregset.count = count;
 750         vmregset.regnums = regnums;
 751         vmregset.regvals = regvals;
 752 
 753         error = ioctl(ctx->fd, VM_SET_REGISTER_SET, &vmregset);
 754         return (error);
 755 }
 756 
 757 int
 758 vm_get_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
 759     const int *regnums, uint64_t *regvals)
 760 {
 761         int error;
 762         struct vm_register_set vmregset;
 763 
 764         bzero(&vmregset, sizeof(vmregset));
 765         vmregset.cpuid = vcpu;
 766         vmregset.count = count;
 767         vmregset.regnums = regnums;
 768         vmregset.regvals = regvals;
 769 
 770         error = ioctl(ctx->fd, VM_GET_REGISTER_SET, &vmregset);
 771         return (error);
 772 }
 773 
 774 int
 775 vm_run(struct vmctx *ctx, int vcpu, const struct vm_entry *vm_entry,
 776     struct vm_exit *vm_exit)
 777 {
 778         struct vm_entry entry;
 779 
 780         bcopy(vm_entry, &entry, sizeof (entry));
 781         entry.cpuid = vcpu;
 782         entry.exit_data = vm_exit;
 783 
 784         return (ioctl(ctx->fd, VM_RUN, &entry));
 785 }
 786 
 787 int
 788 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
 789 {
 790         struct vm_suspend vmsuspend;
 791 
 792         bzero(&vmsuspend, sizeof(vmsuspend));
 793         vmsuspend.how = how;
 794         return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
 795 }
 796 
 797 int
 798 vm_reinit(struct vmctx *ctx)
 799 {
 800 
 801         return (ioctl(ctx->fd, VM_REINIT, 0));
 802 }
 803 
 804 int
 805 vm_inject_exception(struct vmctx *ctx, int vcpu, int vector, int errcode_valid,
 806     uint32_t errcode, int restart_instruction)
 807 {
 808         struct vm_exception exc;
 809 
 810         exc.cpuid = vcpu;
 811         exc.vector = vector;
 812         exc.error_code = errcode;
 813         exc.error_code_valid = errcode_valid;
 814         exc.restart_instruction = restart_instruction;
 815 
 816         return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
 817 }
 818 
 819 #ifndef __FreeBSD__
 820 void
 821 vm_inject_fault(struct vmctx *ctx, int vcpu, int vector, int errcode_valid,
 822     int errcode)
 823 {
 824         int error;
 825         struct vm_exception exc;
 826 
 827         exc.cpuid = vcpu;
 828         exc.vector = vector;
 829         exc.error_code = errcode;
 830         exc.error_code_valid = errcode_valid;
 831         exc.restart_instruction = 1;
 832         error = ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc);
 833 
 834         assert(error == 0);
 835 }
 836 #endif /* __FreeBSD__ */
 837 
 838 int
 839 vm_apicid2vcpu(struct vmctx *ctx, int apicid)
 840 {
 841         /*
 842          * The apic id associated with the 'vcpu' has the same numerical value
 843          * as the 'vcpu' itself.
 844          */
 845         return (apicid);
 846 }
 847 
 848 int
 849 vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
 850 {
 851         struct vm_lapic_irq vmirq;
 852 
 853         bzero(&vmirq, sizeof(vmirq));
 854         vmirq.cpuid = vcpu;
 855         vmirq.vector = vector;
 856 
 857         return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
 858 }
 859 
 860 int
 861 vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
 862 {
 863         struct vm_lapic_irq vmirq;
 864 
 865         bzero(&vmirq, sizeof(vmirq));
 866         vmirq.cpuid = vcpu;
 867         vmirq.vector = vector;
 868 
 869         return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
 870 }
 871 
 872 int
 873 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
 874 {
 875         struct vm_lapic_msi vmmsi;
 876 
 877         bzero(&vmmsi, sizeof(vmmsi));
 878         vmmsi.addr = addr;
 879         vmmsi.msg = msg;
 880 
 881         return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
 882 }
 883 
 884 int
 885 vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
 886 {
 887         struct vm_ioapic_irq ioapic_irq;
 888 
 889         bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
 890         ioapic_irq.irq = irq;
 891 
 892         return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
 893 }
 894 
 895 int
 896 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
 897 {
 898         struct vm_ioapic_irq ioapic_irq;
 899 
 900         bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
 901         ioapic_irq.irq = irq;
 902 
 903         return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
 904 }
 905 
 906 int
 907 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
 908 {
 909         struct vm_ioapic_irq ioapic_irq;
 910 
 911         bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
 912         ioapic_irq.irq = irq;
 913 
 914         return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
 915 }
 916 
 917 int
 918 vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
 919 {
 920 
 921         return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
 922 }
 923 
 924 int
 925 vm_readwrite_kernemu_device(struct vmctx *ctx, int vcpu, vm_paddr_t gpa,
 926     bool write, int size, uint64_t *value)
 927 {
 928         struct vm_readwrite_kernemu_device irp = {
 929                 .vcpuid = vcpu,
 930                 .access_width = fls(size) - 1,
 931                 .gpa = gpa,
 932                 .value = write ? *value : ~0ul,
 933         };
 934         long cmd = (write ? VM_SET_KERNEMU_DEV : VM_GET_KERNEMU_DEV);
 935         int rc;
 936 
 937         rc = ioctl(ctx->fd, cmd, &irp);
 938         if (rc == 0 && !write)
 939                 *value = irp.value;
 940         return (rc);
 941 }
 942 
 943 int
 944 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
 945 {
 946         struct vm_isa_irq isa_irq;
 947 
 948         bzero(&isa_irq, sizeof(struct vm_isa_irq));
 949         isa_irq.atpic_irq = atpic_irq;
 950         isa_irq.ioapic_irq = ioapic_irq;
 951 
 952         return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
 953 }
 954 
 955 int
 956 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
 957 {
 958         struct vm_isa_irq isa_irq;
 959 
 960         bzero(&isa_irq, sizeof(struct vm_isa_irq));
 961         isa_irq.atpic_irq = atpic_irq;
 962         isa_irq.ioapic_irq = ioapic_irq;
 963 
 964         return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
 965 }
 966 
 967 int
 968 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
 969 {
 970         struct vm_isa_irq isa_irq;
 971 
 972         bzero(&isa_irq, sizeof(struct vm_isa_irq));
 973         isa_irq.atpic_irq = atpic_irq;
 974         isa_irq.ioapic_irq = ioapic_irq;
 975 
 976         return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
 977 }
 978 
 979 int
 980 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
 981     enum vm_intr_trigger trigger)
 982 {
 983         struct vm_isa_irq_trigger isa_irq_trigger;
 984 
 985         bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
 986         isa_irq_trigger.atpic_irq = atpic_irq;
 987         isa_irq_trigger.trigger = trigger;
 988 
 989         return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
 990 }
 991 
 992 int
 993 vm_inject_nmi(struct vmctx *ctx, int vcpu)
 994 {
 995         struct vm_nmi vmnmi;
 996 
 997         bzero(&vmnmi, sizeof(vmnmi));
 998         vmnmi.cpuid = vcpu;
 999 
1000         return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
1001 }
1002 
1003 static const char *capstrmap[] = {
1004         [VM_CAP_HALT_EXIT]  = "hlt_exit",
1005         [VM_CAP_MTRAP_EXIT] = "mtrap_exit",
1006         [VM_CAP_PAUSE_EXIT] = "pause_exit",
1007 #ifdef __FreeBSD__
1008         [VM_CAP_UNRESTRICTED_GUEST] = "unrestricted_guest",
1009 #endif
1010         [VM_CAP_ENABLE_INVPCID] = "enable_invpcid",
1011         [VM_CAP_BPT_EXIT] = "bpt_exit",
1012 };
1013 
1014 int
1015 vm_capability_name2type(const char *capname)
1016 {
1017         int i;
1018 
1019         for (i = 0; i < nitems(capstrmap); i++) {
1020                 if (strcmp(capstrmap[i], capname) == 0)
1021                         return (i);
1022         }
1023 
1024         return (-1);
1025 }
1026 
1027 const char *
1028 vm_capability_type2name(int type)
1029 {
1030         if (type >= 0 && type < nitems(capstrmap))
1031                 return (capstrmap[type]);
1032 
1033         return (NULL);
1034 }
1035 
1036 int
1037 vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
1038                   int *retval)
1039 {
1040         int error;
1041         struct vm_capability vmcap;
1042 
1043         bzero(&vmcap, sizeof(vmcap));
1044         vmcap.cpuid = vcpu;
1045         vmcap.captype = cap;
1046 
1047         error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
1048         *retval = vmcap.capval;
1049         return (error);
1050 }
1051 
1052 int
1053 vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
1054 {
1055         struct vm_capability vmcap;
1056 
1057         bzero(&vmcap, sizeof(vmcap));
1058         vmcap.cpuid = vcpu;
1059         vmcap.captype = cap;
1060         vmcap.capval = val;
1061 
1062         return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
1063 }
1064 
1065 #ifdef __FreeBSD__
1066 int
1067 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
1068 {
1069         struct vm_pptdev pptdev;
1070 
1071         bzero(&pptdev, sizeof(pptdev));
1072         pptdev.bus = bus;
1073         pptdev.slot = slot;
1074         pptdev.func = func;
1075 
1076         return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
1077 }
1078 
1079 int
1080 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
1081 {
1082         struct vm_pptdev pptdev;
1083 
1084         bzero(&pptdev, sizeof(pptdev));
1085         pptdev.bus = bus;
1086         pptdev.slot = slot;
1087         pptdev.func = func;
1088 
1089         return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
1090 }
1091 
1092 int
1093 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1094                    vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
1095 {
1096         struct vm_pptdev_mmio pptmmio;
1097 
1098         bzero(&pptmmio, sizeof(pptmmio));
1099         pptmmio.bus = bus;
1100         pptmmio.slot = slot;
1101         pptmmio.func = func;
1102         pptmmio.gpa = gpa;
1103         pptmmio.len = len;
1104         pptmmio.hpa = hpa;
1105 
1106         return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
1107 }
1108 
1109 int
1110 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
1111     uint64_t addr, uint64_t msg, int numvec)
1112 {
1113         struct vm_pptdev_msi pptmsi;
1114 
1115         bzero(&pptmsi, sizeof(pptmsi));
1116         pptmsi.vcpu = vcpu;
1117         pptmsi.bus = bus;
1118         pptmsi.slot = slot;
1119         pptmsi.func = func;
1120         pptmsi.msg = msg;
1121         pptmsi.addr = addr;
1122         pptmsi.numvec = numvec;
1123 
1124         return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
1125 }
1126 
1127 int
1128 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
1129     int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
1130 {
1131         struct vm_pptdev_msix pptmsix;
1132 
1133         bzero(&pptmsix, sizeof(pptmsix));
1134         pptmsix.vcpu = vcpu;
1135         pptmsix.bus = bus;
1136         pptmsix.slot = slot;
1137         pptmsix.func = func;
1138         pptmsix.idx = idx;
1139         pptmsix.msg = msg;
1140         pptmsix.addr = addr;
1141         pptmsix.vector_control = vector_control;
1142 
1143         return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
1144 }
1145 
1146 int
1147 vm_get_pptdev_limits(struct vmctx *ctx, int bus, int slot, int func,
1148     int *msi_limit, int *msix_limit)
1149 {
1150         struct vm_pptdev_limits pptlimits;
1151         int error;
1152 
1153         bzero(&pptlimits, sizeof (pptlimits));
1154         pptlimits.bus = bus;
1155         pptlimits.slot = slot;
1156         pptlimits.func = func;
1157 
1158         error = ioctl(ctx->fd, VM_GET_PPTDEV_LIMITS, &pptlimits);
1159 
1160         *msi_limit = pptlimits.msi_limit;
1161         *msix_limit = pptlimits.msix_limit;
1162 
1163         return (error);
1164 }
1165 #else /* __FreeBSD__ */
1166 int
1167 vm_assign_pptdev(struct vmctx *ctx, int pptfd)
1168 {
1169         struct vm_pptdev pptdev;
1170 
1171         pptdev.pptfd = pptfd;
1172         return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
1173 }
1174 
1175 int
1176 vm_unassign_pptdev(struct vmctx *ctx, int pptfd)
1177 {
1178         struct vm_pptdev pptdev;
1179 
1180         pptdev.pptfd = pptfd;
1181         return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
1182 }
1183 
1184 int
1185 vm_map_pptdev_mmio(struct vmctx *ctx, int pptfd, vm_paddr_t gpa, size_t len,
1186     vm_paddr_t hpa)
1187 {
1188         struct vm_pptdev_mmio pptmmio;
1189 
1190         pptmmio.pptfd = pptfd;
1191         pptmmio.gpa = gpa;
1192         pptmmio.len = len;
1193         pptmmio.hpa = hpa;
1194         return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
1195 }
1196 
1197 int
1198 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int pptfd, uint64_t addr,
1199     uint64_t msg, int numvec)
1200 {
1201         struct vm_pptdev_msi pptmsi;
1202 
1203         pptmsi.vcpu = vcpu;
1204         pptmsi.pptfd = pptfd;
1205         pptmsi.msg = msg;
1206         pptmsi.addr = addr;
1207         pptmsi.numvec = numvec;
1208         return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
1209 }
1210 
1211 int
1212 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int pptfd, int idx,
1213     uint64_t addr, uint64_t msg, uint32_t vector_control)
1214 {
1215         struct vm_pptdev_msix pptmsix;
1216 
1217         pptmsix.vcpu = vcpu;
1218         pptmsix.pptfd = pptfd;
1219         pptmsix.idx = idx;
1220         pptmsix.msg = msg;
1221         pptmsix.addr = addr;
1222         pptmsix.vector_control = vector_control;
1223         return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
1224 }
1225 
1226 int
1227 vm_get_pptdev_limits(struct vmctx *ctx, int pptfd, int *msi_limit,
1228     int *msix_limit)
1229 {
1230         struct vm_pptdev_limits pptlimits;
1231         int error;
1232 
1233         bzero(&pptlimits, sizeof (pptlimits));
1234         pptlimits.pptfd = pptfd;
1235         error = ioctl(ctx->fd, VM_GET_PPTDEV_LIMITS, &pptlimits);
1236 
1237         *msi_limit = pptlimits.msi_limit;
1238         *msix_limit = pptlimits.msix_limit;
1239         return (error);
1240 }
1241 #endif /* __FreeBSD__ */
1242 
1243 uint64_t *
1244 vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
1245              int *ret_entries)
1246 {
1247         int error;
1248 
1249         static struct vm_stats vmstats;
1250 
1251         vmstats.cpuid = vcpu;
1252 
1253         error = ioctl(ctx->fd, VM_STATS_IOC, &vmstats);
1254         if (error == 0) {
1255                 if (ret_entries)
1256                         *ret_entries = vmstats.num_entries;
1257                 if (ret_tv)
1258                         *ret_tv = vmstats.tv;
1259                 return (vmstats.statbuf);
1260         } else
1261                 return (NULL);
1262 }
1263 
1264 const char *
1265 vm_get_stat_desc(struct vmctx *ctx, int index)
1266 {
1267         static struct vm_stat_desc statdesc;
1268 
1269         statdesc.index = index;
1270         if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
1271                 return (statdesc.desc);
1272         else
1273                 return (NULL);
1274 }
1275 
1276 int
1277 vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
1278 {
1279         int error;
1280         struct vm_x2apic x2apic;
1281 
1282         bzero(&x2apic, sizeof(x2apic));
1283         x2apic.cpuid = vcpu;
1284 
1285         error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
1286         *state = x2apic.state;
1287         return (error);
1288 }
1289 
1290 int
1291 vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
1292 {
1293         int error;
1294         struct vm_x2apic x2apic;
1295 
1296         bzero(&x2apic, sizeof(x2apic));
1297         x2apic.cpuid = vcpu;
1298         x2apic.state = state;
1299 
1300         error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
1301 
1302         return (error);
1303 }
1304 
1305 #ifndef __FreeBSD__
1306 int
1307 vcpu_reset(struct vmctx *vmctx, int vcpu)
1308 {
1309         struct vm_vcpu_reset vvr;
1310 
1311         vvr.vcpuid = vcpu;
1312         vvr.kind = VRK_RESET;
1313 
1314         return (ioctl(vmctx->fd, VM_RESET_CPU, &vvr));
1315 }
1316 #else /* __FreeBSD__ */
1317 /*
1318  * From Intel Vol 3a:
1319  * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
1320  */
1321 int
1322 vcpu_reset(struct vmctx *vmctx, int vcpu)
1323 {
1324         int error;
1325         uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
1326         uint32_t desc_access, desc_limit;
1327         uint16_t sel;
1328 
1329         zero = 0;
1330 
1331         rflags = 0x2;
1332         error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
1333         if (error)
1334                 goto done;
1335 
1336         rip = 0xfff0;
1337         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
1338                 goto done;
1339 
1340         cr0 = CR0_NE;
1341         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
1342                 goto done;
1343 
1344         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
1345                 goto done;
1346         
1347         cr4 = 0;
1348         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
1349                 goto done;
1350 
1351         /*
1352          * CS: present, r/w, accessed, 16-bit, byte granularity, usable
1353          */
1354         desc_base = 0xffff0000;
1355         desc_limit = 0xffff;
1356         desc_access = 0x0093;
1357         error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
1358                             desc_base, desc_limit, desc_access);
1359         if (error)
1360                 goto done;
1361 
1362         sel = 0xf000;
1363         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
1364                 goto done;
1365 
1366         /*
1367          * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
1368          */
1369         desc_base = 0;
1370         desc_limit = 0xffff;
1371         desc_access = 0x0093;
1372         error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
1373                             desc_base, desc_limit, desc_access);
1374         if (error)
1375                 goto done;
1376 
1377         error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
1378                             desc_base, desc_limit, desc_access);
1379         if (error)
1380                 goto done;
1381 
1382         error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
1383                             desc_base, desc_limit, desc_access);
1384         if (error)
1385                 goto done;
1386 
1387         error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
1388                             desc_base, desc_limit, desc_access);
1389         if (error)
1390                 goto done;
1391 
1392         error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
1393                             desc_base, desc_limit, desc_access);
1394         if (error)
1395                 goto done;
1396 
1397         sel = 0;
1398         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
1399                 goto done;
1400         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
1401                 goto done;
1402         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
1403                 goto done;
1404         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
1405                 goto done;
1406         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
1407                 goto done;
1408 
1409         /* General purpose registers */
1410         rdx = 0xf00;
1411         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
1412                 goto done;
1413         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
1414                 goto done;
1415         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
1416                 goto done;
1417         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
1418                 goto done;
1419         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
1420                 goto done;
1421         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
1422                 goto done;
1423         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
1424                 goto done;
1425         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
1426                 goto done;
1427 
1428         /* GDTR, IDTR */
1429         desc_base = 0;
1430         desc_limit = 0xffff;
1431         desc_access = 0;
1432         error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
1433                             desc_base, desc_limit, desc_access);
1434         if (error != 0)
1435                 goto done;
1436 
1437         error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
1438                             desc_base, desc_limit, desc_access);
1439         if (error != 0)
1440                 goto done;
1441 
1442         /* TR */
1443         desc_base = 0;
1444         desc_limit = 0xffff;
1445         desc_access = 0x0000008b;
1446         error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
1447         if (error)
1448                 goto done;
1449 
1450         sel = 0;
1451         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
1452                 goto done;
1453 
1454         /* LDTR */
1455         desc_base = 0;
1456         desc_limit = 0xffff;
1457         desc_access = 0x00000082;
1458         error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
1459                             desc_limit, desc_access);
1460         if (error)
1461                 goto done;
1462 
1463         sel = 0;
1464         if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
1465                 goto done;
1466 
1467         /* XXX cr2, debug registers */
1468 
1469         error = 0;
1470 done:
1471         return (error);
1472 }
1473 #endif /* __FreeBSD__ */
1474 
1475 int
1476 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
1477 {
1478         int error, i;
1479         struct vm_gpa_pte gpapte;
1480 
1481         bzero(&gpapte, sizeof(gpapte));
1482         gpapte.gpa = gpa;
1483 
1484         error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
1485 
1486         if (error == 0) {
1487                 *num = gpapte.ptenum;
1488                 for (i = 0; i < gpapte.ptenum; i++)
1489                         pte[i] = gpapte.pte[i];
1490         }
1491 
1492         return (error);
1493 }
1494 
1495 int
1496 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
1497 {
1498         int error;
1499         struct vm_hpet_cap cap;
1500 
1501         bzero(&cap, sizeof(struct vm_hpet_cap));
1502         error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
1503         if (capabilities != NULL)
1504                 *capabilities = cap.capabilities;
1505         return (error);
1506 }
1507 
1508 int
1509 vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1510     uint64_t gla, int prot, uint64_t *gpa, int *fault)
1511 {
1512         struct vm_gla2gpa gg;
1513         int error;
1514 
1515         bzero(&gg, sizeof(struct vm_gla2gpa));
1516         gg.vcpuid = vcpu;
1517         gg.prot = prot;
1518         gg.gla = gla;
1519         gg.paging = *paging;
1520 
1521         error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
1522         if (error == 0) {
1523                 *fault = gg.fault;
1524                 *gpa = gg.gpa;
1525         }
1526         return (error);
1527 }
1528 
1529 int
1530 vm_gla2gpa_nofault(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1531     uint64_t gla, int prot, uint64_t *gpa, int *fault)
1532 {
1533         struct vm_gla2gpa gg;
1534         int error;
1535 
1536         bzero(&gg, sizeof(struct vm_gla2gpa));
1537         gg.vcpuid = vcpu;
1538         gg.prot = prot;
1539         gg.gla = gla;
1540         gg.paging = *paging;
1541 
1542         error = ioctl(ctx->fd, VM_GLA2GPA_NOFAULT, &gg);
1543         if (error == 0) {
1544                 *fault = gg.fault;
1545                 *gpa = gg.gpa;
1546         }
1547         return (error);
1548 }
1549 
1550 #ifndef min
1551 #define min(a,b)        (((a) < (b)) ? (a) : (b))
1552 #endif
1553 
1554 int
1555 vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1556     uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
1557     int *fault)
1558 {
1559         void *va;
1560         uint64_t gpa;
1561         int error, i, n, off;
1562 
1563         for (i = 0; i < iovcnt; i++) {
1564                 iov[i].iov_base = 0;
1565                 iov[i].iov_len = 0;
1566         }
1567 
1568         while (len) {
1569                 assert(iovcnt > 0);
1570                 error = vm_gla2gpa(ctx, vcpu, paging, gla, prot, &gpa, fault);
1571                 if (error || *fault)
1572                         return (error);
1573 
1574                 off = gpa & PAGE_MASK;
1575                 n = min(len, PAGE_SIZE - off);
1576 
1577                 va = vm_map_gpa(ctx, gpa, n);
1578                 if (va == NULL)
1579                         return (EFAULT);
1580 
1581                 iov->iov_base = va;
1582                 iov->iov_len = n;
1583                 iov++;
1584                 iovcnt--;
1585 
1586                 gla += n;
1587                 len -= n;
1588         }
1589         return (0);
1590 }
1591 
1592 void
1593 vm_copy_teardown(struct vmctx *ctx, int vcpu, struct iovec *iov, int iovcnt)
1594 {
1595 
1596         return;
1597 }
1598 
1599 void
1600 vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *iov, void *vp, size_t len)
1601 {
1602         const char *src;
1603         char *dst;
1604         size_t n;
1605 
1606         dst = vp;
1607         while (len) {
1608                 assert(iov->iov_len);
1609                 n = min(len, iov->iov_len);
1610                 src = iov->iov_base;
1611                 bcopy(src, dst, n);
1612 
1613                 iov++;
1614                 dst += n;
1615                 len -= n;
1616         }
1617 }
1618 
1619 void
1620 vm_copyout(struct vmctx *ctx, int vcpu, const void *vp, struct iovec *iov,
1621     size_t len)
1622 {
1623         const char *src;
1624         char *dst;
1625         size_t n;
1626 
1627         src = vp;
1628         while (len) {
1629                 assert(iov->iov_len);
1630                 n = min(len, iov->iov_len);
1631                 dst = iov->iov_base;
1632                 bcopy(src, dst, n);
1633 
1634                 iov++;
1635                 src += n;
1636                 len -= n;
1637         }
1638 }
1639 
1640 static int
1641 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1642 {
1643         struct vm_cpuset vm_cpuset;
1644         int error;
1645 
1646         bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1647         vm_cpuset.which = which;
1648         vm_cpuset.cpusetsize = sizeof(cpuset_t);
1649         vm_cpuset.cpus = cpus;
1650 
1651         error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1652         return (error);
1653 }
1654 
1655 int
1656 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1657 {
1658 
1659         return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1660 }
1661 
1662 int
1663 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1664 {
1665 
1666         return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1667 }
1668 
1669 int
1670 vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus)
1671 {
1672 
1673         return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus));
1674 }
1675 
1676 int
1677 vm_activate_cpu(struct vmctx *ctx, int vcpu)
1678 {
1679         struct vm_activate_cpu ac;
1680         int error;
1681 
1682         bzero(&ac, sizeof(struct vm_activate_cpu));
1683         ac.vcpuid = vcpu;
1684         error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac);
1685         return (error);
1686 }
1687 
1688 int
1689 vm_suspend_cpu(struct vmctx *ctx, int vcpu)
1690 {
1691         struct vm_activate_cpu ac;
1692         int error;
1693 
1694         bzero(&ac, sizeof(struct vm_activate_cpu));
1695         ac.vcpuid = vcpu;
1696         error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac);
1697         return (error);
1698 }
1699 
1700 int
1701 vm_resume_cpu(struct vmctx *ctx, int vcpu)
1702 {
1703         struct vm_activate_cpu ac;
1704         int error;
1705 
1706         bzero(&ac, sizeof(struct vm_activate_cpu));
1707         ac.vcpuid = vcpu;
1708         error = ioctl(ctx->fd, VM_RESUME_CPU, &ac);
1709         return (error);
1710 }
1711 
1712 int
1713 vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *info1, uint64_t *info2)
1714 {
1715         struct vm_intinfo vmii;
1716         int error;
1717 
1718         bzero(&vmii, sizeof(struct vm_intinfo));
1719         vmii.vcpuid = vcpu;
1720         error = ioctl(ctx->fd, VM_GET_INTINFO, &vmii);
1721         if (error == 0) {
1722                 *info1 = vmii.info1;
1723                 *info2 = vmii.info2;
1724         }
1725         return (error);
1726 }
1727 
1728 int
1729 vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t info1)
1730 {
1731         struct vm_intinfo vmii;
1732         int error;
1733 
1734         bzero(&vmii, sizeof(struct vm_intinfo));
1735         vmii.vcpuid = vcpu;
1736         vmii.info1 = info1;
1737         error = ioctl(ctx->fd, VM_SET_INTINFO, &vmii);
1738         return (error);
1739 }
1740 
1741 int
1742 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value)
1743 {
1744         struct vm_rtc_data rtcdata;
1745         int error;
1746 
1747         bzero(&rtcdata, sizeof(struct vm_rtc_data));
1748         rtcdata.offset = offset;
1749         rtcdata.value = value;
1750         error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata);
1751         return (error);
1752 }
1753 
1754 int
1755 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval)
1756 {
1757         struct vm_rtc_data rtcdata;
1758         int error;
1759 
1760         bzero(&rtcdata, sizeof(struct vm_rtc_data));
1761         rtcdata.offset = offset;
1762         error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata);
1763         if (error == 0)
1764                 *retval = rtcdata.value;
1765         return (error);
1766 }
1767 
1768 int
1769 vm_rtc_settime(struct vmctx *ctx, time_t secs)
1770 {
1771         struct vm_rtc_time rtctime;
1772         int error;
1773 
1774         bzero(&rtctime, sizeof(struct vm_rtc_time));
1775         rtctime.secs = secs;
1776         error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime);
1777         return (error);
1778 }
1779 
1780 int
1781 vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
1782 {
1783         struct vm_rtc_time rtctime;
1784         int error;
1785 
1786         bzero(&rtctime, sizeof(struct vm_rtc_time));
1787         error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime);
1788         if (error == 0)
1789                 *secs = rtctime.secs;
1790         return (error);
1791 }
1792 
1793 int
1794 vm_restart_instruction(void *arg, int vcpu)
1795 {
1796         struct vmctx *ctx = arg;
1797 
1798         return (ioctl(ctx->fd, VM_RESTART_INSTRUCTION, &vcpu));
1799 }
1800 
1801 int
1802 vm_set_topology(struct vmctx *ctx,
1803     uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus)
1804 {
1805         struct vm_cpu_topology topology;
1806 
1807         bzero(&topology, sizeof (struct vm_cpu_topology));
1808         topology.sockets = sockets;
1809         topology.cores = cores;
1810         topology.threads = threads;
1811         topology.maxcpus = maxcpus;
1812         return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology));
1813 }
1814 
1815 int
1816 vm_get_topology(struct vmctx *ctx,
1817     uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus)
1818 {
1819         struct vm_cpu_topology topology;
1820         int error;
1821 
1822         bzero(&topology, sizeof (struct vm_cpu_topology));
1823         error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology);
1824         if (error == 0) {
1825                 *sockets = topology.sockets;
1826                 *cores = topology.cores;
1827                 *threads = topology.threads;
1828                 *maxcpus = topology.maxcpus;
1829         }
1830         return (error);
1831 }
1832 
1833 int
1834 vm_get_device_fd(struct vmctx *ctx)
1835 {
1836 
1837         return (ctx->fd);
1838 }
1839 
1840 #ifndef __FreeBSD__
1841 int
1842 vm_pmtmr_set_location(struct vmctx *ctx, uint16_t ioport)
1843 {
1844         return (ioctl(ctx->fd, VM_PMTMR_LOCATE, ioport));
1845 }
1846 
1847 int
1848 vm_wrlock_cycle(struct vmctx *ctx)
1849 {
1850         if (ioctl(ctx->fd, VM_WRLOCK_CYCLE, 0) != 0) {
1851                 return (errno);
1852         }
1853         return (0);
1854 }
1855 
1856 int
1857 vm_get_run_state(struct vmctx *ctx, int vcpu, enum vcpu_run_state *state,
1858     uint8_t *sipi_vector)
1859 {
1860         struct vm_run_state data;
1861 
1862         data.vcpuid = vcpu;
1863         if (ioctl(ctx->fd, VM_GET_RUN_STATE, &data) != 0) {
1864                 return (errno);
1865         }
1866 
1867         *state = data.state;
1868         *sipi_vector = data.sipi_vector;
1869         return (0);
1870 }
1871 
1872 int
1873 vm_set_run_state(struct vmctx *ctx, int vcpu, enum vcpu_run_state state,
1874     uint8_t sipi_vector)
1875 {
1876         struct vm_run_state data;
1877 
1878         data.vcpuid = vcpu;
1879         data.state = state;
1880         data.sipi_vector = sipi_vector;
1881         if (ioctl(ctx->fd, VM_SET_RUN_STATE, &data) != 0) {
1882                 return (errno);
1883         }
1884 
1885         return (0);
1886 }
1887 
1888 #endif /* __FreeBSD__ */
1889 
1890 #ifdef __FreeBSD__
1891 const cap_ioctl_t *
1892 vm_get_ioctls(size_t *len)
1893 {
1894         cap_ioctl_t *cmds;
1895         /* keep in sync with machine/vmm_dev.h */
1896         static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT,
1897             VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG,
1898             VM_MMAP_GETNEXT, VM_SET_REGISTER, VM_GET_REGISTER,
1899             VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR,
1900             VM_SET_REGISTER_SET, VM_GET_REGISTER_SET,
1901             VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV,
1902             VM_INJECT_EXCEPTION, VM_LAPIC_IRQ, VM_LAPIC_LOCAL_IRQ,
1903             VM_LAPIC_MSI, VM_IOAPIC_ASSERT_IRQ, VM_IOAPIC_DEASSERT_IRQ,
1904             VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ,
1905             VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER,
1906             VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV,
1907             VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI,
1908             VM_PPTDEV_MSIX, VM_INJECT_NMI, VM_STATS, VM_STAT_DESC,
1909             VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE,
1910             VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA,
1911             VM_GLA2GPA_NOFAULT,
1912             VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SUSPEND_CPU, VM_RESUME_CPU,
1913             VM_SET_INTINFO, VM_GET_INTINFO,
1914             VM_RTC_WRITE, VM_RTC_READ, VM_RTC_SETTIME, VM_RTC_GETTIME,
1915             VM_RESTART_INSTRUCTION, VM_SET_TOPOLOGY, VM_GET_TOPOLOGY };
1916 
1917         if (len == NULL) {
1918                 cmds = malloc(sizeof(vm_ioctl_cmds));
1919                 if (cmds == NULL)
1920                         return (NULL);
1921                 bcopy(vm_ioctl_cmds, cmds, sizeof(vm_ioctl_cmds));
1922                 return (cmds);
1923         }
1924 
1925         *len = nitems(vm_ioctl_cmds);
1926         return (NULL);
1927 }
1928 #endif /* __FreeBSD__ */