Print this page
Revert "OS-8005 bhyve memory pressure needs to target ARC better (#354)"
This reverts commit a6033573eedd94118d2b9e65f45deca0bf4b42f7.
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/lib/libvmmapi/common/vmmapi.h
+++ new/usr/src/lib/libvmmapi/common/vmmapi.h
1 1 /*-
2 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 3 *
4 4 * Copyright (c) 2011 NetApp, Inc.
5 5 * All rights reserved.
6 6 *
7 7 * Redistribution and use in source and binary forms, with or without
8 8 * modification, are permitted provided that the following conditions
9 9 * are met:
10 10 * 1. Redistributions of source code must retain the above copyright
11 11 * notice, this list of conditions and the following disclaimer.
12 12 * 2. Redistributions in binary form must reproduce the above copyright
13 13 * notice, this list of conditions and the following disclaimer in the
14 14 * documentation and/or other materials provided with the distribution.
15 15 *
16 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 26 * SUCH DAMAGE.
27 27 *
28 28 * $FreeBSD$
29 29 */
30 30 /*
|
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
31 31 * This file and its contents are supplied under the terms of the
32 32 * Common Development and Distribution License ("CDDL"), version 1.0.
33 33 * You may only use this file in accordance with the terms of version
34 34 * 1.0 of the CDDL.
35 35 *
36 36 * A full copy of the text of the CDDL should have accompanied this
37 37 * source. A copy of the CDDL is also available via the Internet at
38 38 * http://www.illumos.org/license/CDDL.
39 39 *
40 40 * Copyright 2015 Pluribus Networks Inc.
41 - * Copyright 2020 Joyent, Inc.
41 + * Copyright 2019 Joyent, Inc.
42 42 * Copyright 2020 Oxide Computer Company
43 43 */
44 44
45 45 #ifndef _VMMAPI_H_
46 46 #define _VMMAPI_H_
47 47
48 48 #include <sys/param.h>
49 49 #include <sys/cpuset.h>
50 50 #include <x86/segments.h>
51 51
52 52 #include <stdbool.h>
53 53
54 54 /*
55 55 * API version for out-of-tree consumers like grub-bhyve for making compile
56 56 * time decisions.
57 57 */
58 58 #define VMMAPI_VERSION 0103 /* 2 digit major followed by 2 digit minor */
59 59
60 60 struct iovec;
61 61 struct vmctx;
62 62 enum x2apic_state;
63 63
64 64 /*
65 65 * Different styles of mapping the memory assigned to a VM into the address
66 66 * space of the controlling process.
67 67 */
68 68 enum vm_mmap_style {
69 69 VM_MMAP_NONE, /* no mapping */
70 70 VM_MMAP_ALL, /* fully and statically mapped */
71 71 VM_MMAP_SPARSE, /* mappings created on-demand */
72 72 };
73 73
74 74 /*
75 75 * 'flags' value passed to 'vm_set_memflags()'.
76 76 */
77 77 #define VM_MEM_F_INCORE 0x01 /* include guest memory in core file */
78 78 #define VM_MEM_F_WIRED 0x02 /* guest memory is wired */
79 79
80 80 /*
81 81 * Identifiers for memory segments:
82 82 * - vm_setup_memory() uses VM_SYSMEM for the system memory segment.
83 83 * - the remaining identifiers can be used to create devmem segments.
84 84 */
85 85 enum {
86 86 #ifdef __FreeBSD__
87 87 VM_SYSMEM,
88 88 #else
89 89 VM_LOWMEM,
90 90 VM_HIGHMEM,
91 91 #endif
92 92 VM_BOOTROM,
93 93 VM_FRAMEBUFFER,
94 94 };
95 95
96 96 /*
97 97 * Get the length and name of the memory segment identified by 'segid'.
98 98 * Note that system memory segments are identified with a nul name.
99 99 *
100 100 * Returns 0 on success and non-zero otherwise.
101 101 */
102 102 int vm_get_memseg(struct vmctx *ctx, int ident, size_t *lenp, char *name,
103 103 size_t namesiz);
104 104
105 105 /*
106 106 * Iterate over the guest address space. This function finds an address range
107 107 * that starts at an address >= *gpa.
108 108 *
109 109 * Returns 0 if the next address range was found and non-zero otherwise.
110 110 */
111 111 int vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
112 112 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
113 113 /*
114 114 * Create a device memory segment identified by 'segid'.
115 115 *
116 116 * Returns a pointer to the memory segment on success and MAP_FAILED otherwise.
117 117 */
118 118 void *vm_create_devmem(struct vmctx *ctx, int segid, const char *name,
119 119 size_t len);
120 120
121 121 #ifndef __FreeBSD__
122 122 /*
123 123 * Return the map offset for the device memory segment 'segid'.
124 124 */
125 125 int vm_get_devmem_offset(struct vmctx *ctx, int segid, off_t *mapoff);
126 126 #endif
127 127
128 128 /*
129 129 * Map the memory segment identified by 'segid' into the guest address space
130 130 * at [gpa,gpa+len) with protection 'prot'.
131 131 */
132 132 int vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid,
133 133 vm_ooffset_t segoff, size_t len, int prot);
134 134
135 135 int vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len);
136 136
137 137 int vm_create(const char *name);
138 138 int vm_get_device_fd(struct vmctx *ctx);
139 139 struct vmctx *vm_open(const char *name);
140 140 #ifndef __FreeBSD__
141 141 void vm_close(struct vmctx *ctx);
142 142 #endif
143 143 void vm_destroy(struct vmctx *ctx);
144 144 int vm_parse_memsize(const char *optarg, size_t *memsize);
145 145 int vm_setup_memory(struct vmctx *ctx, size_t len, enum vm_mmap_style s);
146 146 void *vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len);
147 147 int vm_get_gpa_pmap(struct vmctx *, uint64_t gpa, uint64_t *pte, int *num);
148 148 int vm_gla2gpa(struct vmctx *, int vcpuid, struct vm_guest_paging *paging,
149 149 uint64_t gla, int prot, uint64_t *gpa, int *fault);
150 150 int vm_gla2gpa_nofault(struct vmctx *, int vcpuid,
151 151 struct vm_guest_paging *paging, uint64_t gla, int prot,
152 152 uint64_t *gpa, int *fault);
153 153 uint32_t vm_get_lowmem_limit(struct vmctx *ctx);
154 154 void vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit);
155 155 void vm_set_memflags(struct vmctx *ctx, int flags);
156 156 int vm_get_memflags(struct vmctx *ctx);
157 157 size_t vm_get_lowmem_size(struct vmctx *ctx);
158 158 size_t vm_get_highmem_size(struct vmctx *ctx);
159 159 int vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
160 160 uint64_t base, uint32_t limit, uint32_t access);
161 161 int vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
162 162 uint64_t *base, uint32_t *limit, uint32_t *access);
163 163 int vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg,
164 164 struct seg_desc *seg_desc);
165 165 int vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val);
166 166 int vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *retval);
167 167 int vm_set_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
168 168 const int *regnums, uint64_t *regvals);
169 169 int vm_get_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
170 170 const int *regnums, uint64_t *regvals);
171 171 int vm_run(struct vmctx *ctx, int vcpu, const struct vm_entry *vm_entry,
172 172 struct vm_exit *vm_exit);
173 173 int vm_suspend(struct vmctx *ctx, enum vm_suspend_how how);
174 174 int vm_reinit(struct vmctx *ctx);
175 175 int vm_apicid2vcpu(struct vmctx *ctx, int apicid);
176 176 int vm_inject_exception(struct vmctx *ctx, int vcpu, int vector,
177 177 int errcode_valid, uint32_t errcode, int restart_instruction);
178 178 #ifndef __FreeBSD__
179 179 void vm_inject_fault(struct vmctx *ctx, int vcpu, int vector,
180 180 int errcode_valid, int errcode);
181 181
182 182 static __inline void
183 183 vm_inject_gp(struct vmctx *ctx, int vcpuid)
184 184 {
185 185 vm_inject_fault(ctx, vcpuid, IDT_GP, 1, 0);
186 186 }
187 187
188 188 static __inline void
189 189 vm_inject_ac(struct vmctx *ctx, int vcpuid, int errcode)
190 190 {
191 191 vm_inject_fault(ctx, vcpuid, IDT_AC, 1, errcode);
192 192 }
193 193 static __inline void
194 194 vm_inject_ss(struct vmctx *ctx, int vcpuid, int errcode)
195 195 {
196 196 vm_inject_fault(ctx, vcpuid, IDT_SS, 1, errcode);
197 197 }
198 198 #endif
199 199 int vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector);
200 200 int vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector);
201 201 int vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg);
202 202 int vm_ioapic_assert_irq(struct vmctx *ctx, int irq);
203 203 int vm_ioapic_deassert_irq(struct vmctx *ctx, int irq);
204 204 int vm_ioapic_pulse_irq(struct vmctx *ctx, int irq);
205 205 int vm_ioapic_pincount(struct vmctx *ctx, int *pincount);
206 206 int vm_readwrite_kernemu_device(struct vmctx *ctx, int vcpu,
207 207 vm_paddr_t gpa, bool write, int size, uint64_t *value);
208 208 int vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
209 209 int vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
210 210 int vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
211 211 int vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
212 212 enum vm_intr_trigger trigger);
213 213 int vm_inject_nmi(struct vmctx *ctx, int vcpu);
214 214 int vm_capability_name2type(const char *capname);
215 215 const char *vm_capability_type2name(int type);
216 216 int vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
217 217 int *retval);
218 218 int vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
219 219 int val);
220 220 #ifdef __FreeBSD__
221 221 int vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func);
222 222 int vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func);
223 223 int vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
224 224 vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
225 225 int vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
226 226 vm_paddr_t gpa, size_t len);
227 227 int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot,
228 228 int func, uint64_t addr, uint64_t msg, int numvec);
229 229 int vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot,
230 230 int func, int idx, uint64_t addr, uint64_t msg,
231 231 uint32_t vector_control);
232 232 int vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func);
233 233 int vm_get_pptdev_limits(struct vmctx *ctx, int bus, int slot, int func,
234 234 int *msi_limit, int *msix_limit);
235 235 #else /* __FreeBSD__ */
236 236 int vm_assign_pptdev(struct vmctx *ctx, int pptfd);
237 237 int vm_unassign_pptdev(struct vmctx *ctx, int pptfd);
238 238 int vm_map_pptdev_mmio(struct vmctx *ctx, int pptfd, vm_paddr_t gpa,
239 239 size_t len, vm_paddr_t hpa);
240 240 int vm_unmap_pptdev_mmio(struct vmctx *ctx, int pptfd, vm_paddr_t gpa,
241 241 size_t len);
242 242 int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int pptfd,
243 243 uint64_t addr, uint64_t msg, int numvec);
244 244 int vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int pptfd,
245 245 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control);
246 246 int vm_disable_pptdev_msix(struct vmctx *ctx, int pptfd);
247 247 int vm_get_pptdev_limits(struct vmctx *ctx, int pptfd, int *msi_limit,
248 248 int *msix_limit);
249 249 #endif /* __FreeBSD__ */
250 250
251 251 int vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *i1, uint64_t *i2);
252 252 int vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t exit_intinfo);
253 253
254 254 #ifdef __FreeBSD__
255 255 const cap_ioctl_t *vm_get_ioctls(size_t *len);
256 256 #endif
257 257
258 258 /*
259 259 * Return a pointer to the statistics buffer. Note that this is not MT-safe.
260 260 */
261 261 uint64_t *vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
262 262 int *ret_entries);
263 263 const char *vm_get_stat_desc(struct vmctx *ctx, int index);
264 264
265 265 int vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *s);
266 266 int vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state s);
267 267
268 268 int vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities);
269 269
270 270 /*
271 271 * Translate the GLA range [gla,gla+len) into GPA segments in 'iov'.
272 272 * The 'iovcnt' should be big enough to accommodate all GPA segments.
273 273 *
274 274 * retval fault Interpretation
275 275 * 0 0 Success
276 276 * 0 1 An exception was injected into the guest
277 277 * EFAULT N/A Error
278 278 */
279 279 int vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *pg,
280 280 uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
281 281 int *fault);
282 282 void vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *guest_iov,
283 283 void *host_dst, size_t len);
284 284 void vm_copyout(struct vmctx *ctx, int vcpu, const void *host_src,
285 285 struct iovec *guest_iov, size_t len);
286 286 void vm_copy_teardown(struct vmctx *ctx, int vcpu, struct iovec *iov,
287 287 int iovcnt);
288 288
289 289 /* RTC */
290 290 int vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value);
291 291 int vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval);
292 292 int vm_rtc_settime(struct vmctx *ctx, time_t secs);
293 293 int vm_rtc_gettime(struct vmctx *ctx, time_t *secs);
294 294
295 295 /* Reset vcpu register state */
296 296 int vcpu_reset(struct vmctx *ctx, int vcpu);
297 297
298 298 int vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus);
299 299 int vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus);
300 300 int vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus);
301 301 int vm_activate_cpu(struct vmctx *ctx, int vcpu);
302 302 int vm_suspend_cpu(struct vmctx *ctx, int vcpu);
303 303 int vm_resume_cpu(struct vmctx *ctx, int vcpu);
304 304
305 305 /* CPU topology */
306 306 int vm_set_topology(struct vmctx *ctx, uint16_t sockets, uint16_t cores,
307 307 uint16_t threads, uint16_t maxcpus);
308 308 int vm_get_topology(struct vmctx *ctx, uint16_t *sockets, uint16_t *cores,
|
↓ open down ↓ |
257 lines elided |
↑ open up ↑ |
309 309 uint16_t *threads, uint16_t *maxcpus);
310 310
311 311 #ifndef __FreeBSD__
312 312 /* illumos-specific APIs */
313 313 int vm_pmtmr_set_location(struct vmctx *ctx, uint16_t ioport);
314 314 int vm_wrlock_cycle(struct vmctx *ctx);
315 315 int vm_get_run_state(struct vmctx *ctx, int vcpu, enum vcpu_run_state *state,
316 316 uint8_t *sipi_vector);
317 317 int vm_set_run_state(struct vmctx *ctx, int vcpu, enum vcpu_run_state state,
318 318 uint8_t sipi_vector);
319 -int vm_arc_resv(struct vmctx *ctx, size_t);
320 319 #endif /* __FreeBSD__ */
321 320
322 321 #ifdef __FreeBSD__
323 322 /*
324 323 * FreeBSD specific APIs
325 324 */
326 325 int vm_setup_freebsd_registers(struct vmctx *ctx, int vcpu,
327 326 uint64_t rip, uint64_t cr3, uint64_t gdtbase,
328 327 uint64_t rsp);
329 328 int vm_setup_freebsd_registers_i386(struct vmctx *vmctx, int vcpu,
330 329 uint32_t eip, uint32_t gdtbase,
331 330 uint32_t esp);
332 331 void vm_setup_freebsd_gdt(uint64_t *gdtr);
333 332 #endif
334 333 #endif /* _VMMAPI_H_ */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX