- Notifications
You must be signed in to change notification settings - Fork 889
/
Copy pathvmmvar.h
970 lines (889 loc) · 28.5 KB
/
vmmvar.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
/* $OpenBSD: vmmvar.h,v 1.100 2024/04/09 21:55:16 dv Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* CPU capabilities for VMM operation
*/
#ifndef_MACHINE_VMMVAR_H_
#define_MACHINE_VMMVAR_H_
#defineVMM_HV_SIGNATURE "OpenBSDVMM58"
#defineVMM_MAX_MEM_RANGES 16
#defineVMM_MAX_DISKS_PER_VM 4
#defineVMM_MAX_NAME_LEN 64
#defineVMM_MAX_VCPUS 512
#defineVMM_MAX_VCPUS_PER_VM 64
#defineVMM_MAX_VM_MEM_SIZE 128L * 1024 * 1024 * 1024
#defineVMM_MAX_NICS_PER_VM 4
#defineVMM_PCI_MMIO_BAR_BASE 0xF0000000ULL
#defineVMM_PCI_MMIO_BAR_END 0xFFDFFFFFULL /* 2 MiB below 4 GiB */
/* VMX: Basic Exit Reasons */
#defineVMX_EXIT_NMI 0
#defineVMX_EXIT_EXTINT 1
#defineVMX_EXIT_TRIPLE_FAULT 2
#defineVMX_EXIT_INIT 3
#defineVMX_EXIT_SIPI 4
#defineVMX_EXIT_IO_SMI 5
#defineVMX_EXIT_OTHER_SMI 6
#defineVMX_EXIT_INT_WINDOW 7
#defineVMX_EXIT_NMI_WINDOW 8
#defineVMX_EXIT_TASK_SWITCH 9
#defineVMX_EXIT_CPUID 10
#defineVMX_EXIT_GETSEC 11
#defineVMX_EXIT_HLT 12
#defineVMX_EXIT_INVD 13
#defineVMX_EXIT_INVLPG 14
#defineVMX_EXIT_RDPMC 15
#defineVMX_EXIT_RDTSC 16
#defineVMX_EXIT_RSM 17
#defineVMX_EXIT_VMCALL 18
#defineVMX_EXIT_VMCLEAR 19
#defineVMX_EXIT_VMLAUNCH 20
#defineVMX_EXIT_VMPTRLD 21
#defineVMX_EXIT_VMPTRST 22
#defineVMX_EXIT_VMREAD 23
#defineVMX_EXIT_VMRESUME 24
#defineVMX_EXIT_VMWRITE 25
#defineVMX_EXIT_VMXOFF 26
#defineVMX_EXIT_VMXON 27
#defineVMX_EXIT_CR_ACCESS 28
#defineVMX_EXIT_MOV_DR 29
#defineVMX_EXIT_IO 30
#defineVMX_EXIT_RDMSR 31
#defineVMX_EXIT_WRMSR 32
#defineVMX_EXIT_ENTRY_FAILED_GUEST_STATE 33
#defineVMX_EXIT_ENTRY_FAILED_MSR_LOAD 34
#defineVMX_EXIT_MWAIT 36
#defineVMX_EXIT_MTF 37
#defineVMX_EXIT_MONITOR 39
#defineVMX_EXIT_PAUSE 40
#defineVMX_EXIT_ENTRY_FAILED_MCE 41
#defineVMX_EXIT_TPR_BELOW_THRESHOLD 43
#defineVMX_EXIT_APIC_ACCESS 44
#defineVMX_EXIT_VIRTUALIZED_EOI 45
#defineVMX_EXIT_GDTR_IDTR 46
#defineVMX_EXIT_LDTR_TR 47
#defineVMX_EXIT_EPT_VIOLATION 48
#defineVMX_EXIT_EPT_MISCONFIGURATION 49
#defineVMX_EXIT_INVEPT 50
#defineVMX_EXIT_RDTSCP 51
#defineVMX_EXIT_VMX_PREEMPTION_TIMER_EXPIRED 52
#defineVMX_EXIT_INVVPID 53
#defineVMX_EXIT_WBINVD 54
#defineVMX_EXIT_XSETBV 55
#defineVMX_EXIT_APIC_WRITE 56
#defineVMX_EXIT_RDRAND 57
#defineVMX_EXIT_INVPCID 58
#defineVMX_EXIT_VMFUNC 59
#defineVMX_EXIT_RDSEED 61
#defineVMX_EXIT_XSAVES 63
#defineVMX_EXIT_XRSTORS 64
/*
* VMX: Misc defines
*/
#defineVMX_MAX_CR3_TARGETS 256
#defineVMX_VMCS_PA_CLEAR 0xFFFFFFFFFFFFFFFFUL
#defineVM_EXIT_TERMINATED 0xFFFE
#defineVM_EXIT_NONE 0xFFFF
/*
* SVM: Intercept codes (exit reasons)
*/
#defineSVM_VMEXIT_CR0_READ 0x00
#defineSVM_VMEXIT_CR1_READ 0x01
#defineSVM_VMEXIT_CR2_READ 0x02
#defineSVM_VMEXIT_CR3_READ 0x03
#defineSVM_VMEXIT_CR4_READ 0x04
#defineSVM_VMEXIT_CR5_READ 0x05
#defineSVM_VMEXIT_CR6_READ 0x06
#defineSVM_VMEXIT_CR7_READ 0x07
#defineSVM_VMEXIT_CR8_READ 0x08
#defineSVM_VMEXIT_CR9_READ 0x09
#defineSVM_VMEXIT_CR10_READ 0x0A
#defineSVM_VMEXIT_CR11_READ 0x0B
#defineSVM_VMEXIT_CR12_READ 0x0C
#defineSVM_VMEXIT_CR13_READ 0x0D
#defineSVM_VMEXIT_CR14_READ 0x0E
#defineSVM_VMEXIT_CR15_READ 0x0F
#defineSVM_VMEXIT_CR0_WRITE 0x10
#defineSVM_VMEXIT_CR1_WRITE 0x11
#defineSVM_VMEXIT_CR2_WRITE 0x12
#defineSVM_VMEXIT_CR3_WRITE 0x13
#defineSVM_VMEXIT_CR4_WRITE 0x14
#defineSVM_VMEXIT_CR5_WRITE 0x15
#defineSVM_VMEXIT_CR6_WRITE 0x16
#defineSVM_VMEXIT_CR7_WRITE 0x17
#defineSVM_VMEXIT_CR8_WRITE 0x18
#defineSVM_VMEXIT_CR9_WRITE 0x19
#defineSVM_VMEXIT_CR10_WRITE 0x1A
#defineSVM_VMEXIT_CR11_WRITE 0x1B
#defineSVM_VMEXIT_CR12_WRITE 0x1C
#defineSVM_VMEXIT_CR13_WRITE 0x1D
#defineSVM_VMEXIT_CR14_WRITE 0x1E
#defineSVM_VMEXIT_CR15_WRITE 0x1F
#defineSVM_VMEXIT_DR0_READ 0x20
#defineSVM_VMEXIT_DR1_READ 0x21
#defineSVM_VMEXIT_DR2_READ 0x22
#defineSVM_VMEXIT_DR3_READ 0x23
#defineSVM_VMEXIT_DR4_READ 0x24
#defineSVM_VMEXIT_DR5_READ 0x25
#defineSVM_VMEXIT_DR6_READ 0x26
#defineSVM_VMEXIT_DR7_READ 0x27
#defineSVM_VMEXIT_DR8_READ 0x28
#defineSVM_VMEXIT_DR9_READ 0x29
#defineSVM_VMEXIT_DR10_READ 0x2A
#defineSVM_VMEXIT_DR11_READ 0x2B
#defineSVM_VMEXIT_DR12_READ 0x2C
#defineSVM_VMEXIT_DR13_READ 0x2D
#defineSVM_VMEXIT_DR14_READ 0x2E
#defineSVM_VMEXIT_DR15_READ 0x2F
#defineSVM_VMEXIT_DR0_WRITE 0x30
#defineSVM_VMEXIT_DR1_WRITE 0x31
#defineSVM_VMEXIT_DR2_WRITE 0x32
#defineSVM_VMEXIT_DR3_WRITE 0x33
#defineSVM_VMEXIT_DR4_WRITE 0x34
#defineSVM_VMEXIT_DR5_WRITE 0x35
#defineSVM_VMEXIT_DR6_WRITE 0x36
#defineSVM_VMEXIT_DR7_WRITE 0x37
#defineSVM_VMEXIT_DR8_WRITE 0x38
#defineSVM_VMEXIT_DR9_WRITE 0x39
#defineSVM_VMEXIT_DR10_WRITE 0x3A
#defineSVM_VMEXIT_DR11_WRITE 0x3B
#defineSVM_VMEXIT_DR12_WRITE 0x3C
#defineSVM_VMEXIT_DR13_WRITE 0x3D
#defineSVM_VMEXIT_DR14_WRITE 0x3E
#defineSVM_VMEXIT_DR15_WRITE 0x3F
#defineSVM_VMEXIT_EXCP0 0x40
#defineSVM_VMEXIT_EXCP1 0x41
#defineSVM_VMEXIT_EXCP2 0x42
#defineSVM_VMEXIT_EXCP3 0x43
#defineSVM_VMEXIT_EXCP4 0x44
#defineSVM_VMEXIT_EXCP5 0x45
#defineSVM_VMEXIT_EXCP6 0x46
#defineSVM_VMEXIT_EXCP7 0x47
#defineSVM_VMEXIT_EXCP8 0x48
#defineSVM_VMEXIT_EXCP9 0x49
#defineSVM_VMEXIT_EXCP10 0x4A
#defineSVM_VMEXIT_EXCP11 0x4B
#defineSVM_VMEXIT_EXCP12 0x4C
#defineSVM_VMEXIT_EXCP13 0x4D
#defineSVM_VMEXIT_EXCP14 0x4E
#defineSVM_VMEXIT_EXCP15 0x4F
#defineSVM_VMEXIT_EXCP16 0x50
#defineSVM_VMEXIT_EXCP17 0x51
#defineSVM_VMEXIT_EXCP18 0x52
#defineSVM_VMEXIT_EXCP19 0x53
#defineSVM_VMEXIT_EXCP20 0x54
#defineSVM_VMEXIT_EXCP21 0x55
#defineSVM_VMEXIT_EXCP22 0x56
#defineSVM_VMEXIT_EXCP23 0x57
#defineSVM_VMEXIT_EXCP24 0x58
#defineSVM_VMEXIT_EXCP25 0x59
#defineSVM_VMEXIT_EXCP26 0x5A
#defineSVM_VMEXIT_EXCP27 0x5B
#defineSVM_VMEXIT_EXCP28 0x5C
#defineSVM_VMEXIT_EXCP29 0x5D
#defineSVM_VMEXIT_EXCP30 0x5E
#defineSVM_VMEXIT_EXCP31 0x5F
#defineSVM_VMEXIT_INTR 0x60
#defineSVM_VMEXIT_NMI 0x61
#defineSVM_VMEXIT_SMI 0x62
#defineSVM_VMEXIT_INIT 0x63
#defineSVM_VMEXIT_VINTR 0x64
#defineSVM_VMEXIT_CR0_SEL_WRITE 0x65
#defineSVM_VMEXIT_IDTR_READ 0x66
#defineSVM_VMEXIT_GDTR_READ 0x67
#defineSVM_VMEXIT_LDTR_READ 0x68
#defineSVM_VMEXIT_TR_READ 0x69
#defineSVM_VMEXIT_IDTR_WRITE 0x6A
#defineSVM_VMEXIT_GDTR_WRITE 0x6B
#defineSVM_VMEXIT_LDTR_WRITE 0x6C
#defineSVM_VMEXIT_TR_WRITE 0x6D
#defineSVM_VMEXIT_RDTSC 0x6E
#defineSVM_VMEXIT_RDPMC 0x6F
#defineSVM_VMEXIT_PUSHF 0x70
#defineSVM_VMEXIT_POPF 0x71
#defineSVM_VMEXIT_CPUID 0x72
#defineSVM_VMEXIT_RSM 0x73
#defineSVM_VMEXIT_IRET 0x74
#defineSVM_VMEXIT_SWINT 0x75
#defineSVM_VMEXIT_INVD 0x76
#defineSVM_VMEXIT_PAUSE 0x77
#defineSVM_VMEXIT_HLT 0x78
#defineSVM_VMEXIT_INVLPG 0x79
#defineSVM_VMEXIT_INVLPGA 0x7A
#defineSVM_VMEXIT_IOIO 0x7B
#defineSVM_VMEXIT_MSR 0x7C
#defineSVM_VMEXIT_TASK_SWITCH 0x7D
#defineSVM_VMEXIT_FERR_FREEZE 0x7E
#defineSVM_VMEXIT_SHUTDOWN 0x7F
#defineSVM_VMEXIT_VMRUN 0x80
#defineSVM_VMEXIT_VMMCALL 0x81
#defineSVM_VMEXIT_VMLOAD 0x82
#defineSVM_VMEXIT_VMSAVE 0x83
#defineSVM_VMEXIT_STGI 0x84
#defineSVM_VMEXIT_CLGI 0x85
#defineSVM_VMEXIT_SKINIT 0x86
#defineSVM_VMEXIT_RDTSCP 0x87
#defineSVM_VMEXIT_ICEBP 0x88
#defineSVM_VMEXIT_WBINVD 0x89
#defineSVM_VMEXIT_MONITOR 0x8A
#defineSVM_VMEXIT_MWAIT 0x8B
#defineSVM_VMEXIT_MWAIT_CONDITIONAL 0x8C
#defineSVM_VMEXIT_XSETBV 0x8D
#defineSVM_VMEXIT_EFER_WRITE_TRAP 0x8F
#defineSVM_VMEXIT_CR0_WRITE_TRAP 0x90
#defineSVM_VMEXIT_CR1_WRITE_TRAP 0x91
#defineSVM_VMEXIT_CR2_WRITE_TRAP 0x92
#defineSVM_VMEXIT_CR3_WRITE_TRAP 0x93
#defineSVM_VMEXIT_CR4_WRITE_TRAP 0x94
#defineSVM_VMEXIT_CR5_WRITE_TRAP 0x95
#defineSVM_VMEXIT_CR6_WRITE_TRAP 0x96
#defineSVM_VMEXIT_CR7_WRITE_TRAP 0x97
#defineSVM_VMEXIT_CR8_WRITE_TRAP 0x98
#defineSVM_VMEXIT_CR9_WRITE_TRAP 0x99
#defineSVM_VMEXIT_CR10_WRITE_TRAP 0x9A
#defineSVM_VMEXIT_CR11_WRITE_TRAP 0x9B
#defineSVM_VMEXIT_CR12_WRITE_TRAP 0x9C
#defineSVM_VMEXIT_CR13_WRITE_TRAP 0x9D
#defineSVM_VMEXIT_CR14_WRITE_TRAP 0x9E
#defineSVM_VMEXIT_CR15_WRITE_TRAP 0x9F
#defineSVM_VMEXIT_NPF 0x400
#defineSVM_AVIC_INCOMPLETE_IPI 0x401
#defineSVM_AVIC_NOACCEL 0x402
#defineSVM_VMEXIT_VMGEXIT 0x403
#defineSVM_VMEXIT_INVALID -1
/*
* Exception injection vectors (these correspond to the CPU exception types
* defined in the SDM.)
*/
#defineVMM_EX_DE 0 /* Divide Error #DE */
#defineVMM_EX_DB 1 /* Debug Exception #DB */
#defineVMM_EX_NMI 2 /* NMI */
#defineVMM_EX_BP 3 /* Breakpoint #BP */
#defineVMM_EX_OF 4 /* Overflow #OF */
#defineVMM_EX_BR 5 /* Bound range exceeded #BR */
#defineVMM_EX_UD 6 /* Undefined opcode #UD */
#defineVMM_EX_NM 7 /* Device not available #NM */
#defineVMM_EX_DF 8 /* Double fault #DF */
#defineVMM_EX_CP 9 /* Coprocessor segment overrun (unused) */
#defineVMM_EX_TS 10 /* Invalid TSS #TS */
#defineVMM_EX_NP 11 /* Segment not present #NP */
#defineVMM_EX_SS 12 /* Stack segment fault #SS */
#defineVMM_EX_GP 13 /* General protection #GP */
#defineVMM_EX_PF 14 /* Page fault #PF */
#defineVMM_EX_MF 16 /* x87 FPU floating point error #MF */
#defineVMM_EX_AC 17 /* Alignment check #AC */
#defineVMM_EX_MC 18 /* Machine check #MC */
#defineVMM_EX_XM 19 /* SIMD floating point exception #XM */
#defineVMM_EX_VE 20 /* Virtualization exception #VE */
enum {
VEI_DIR_OUT,
VEI_DIR_IN
};
enum {
VEE_FAULT_INVALID=0,
VEE_FAULT_HANDLED,
VEE_FAULT_MMIO_ASSIST,
VEE_FAULT_PROTECT,
};
enum {
VMM_CPU_MODE_REAL,
VMM_CPU_MODE_PROT,
VMM_CPU_MODE_PROT32,
VMM_CPU_MODE_COMPAT,
VMM_CPU_MODE_LONG,
VMM_CPU_MODE_UNKNOWN,
};
structvmm_softc_md {
/* Capabilities */
uint32_tnr_rvi_cpus; /* [I] */
uint32_tnr_ept_cpus; /* [I] */
uint8_tpkru_enabled; /* [I] */
};
/*
* vm exit data
* vm_exit_inout : describes an IN/OUT exit
*/
structvm_exit_inout {
uint8_tvei_size; /* Size of access */
uint8_tvei_dir; /* Direction */
uint8_tvei_rep; /* REP prefix? */
uint8_tvei_string; /* string variety? */
uint8_tvei_encoding; /* operand encoding */
uint16_tvei_port; /* port */
uint32_tvei_data; /* data */
uint8_tvei_insn_len; /* Count of instruction bytes */
};
/*
* vm_exit_eptviolation : describes an EPT VIOLATION exit
*/
structvm_exit_eptviolation {
uint8_tvee_fault_type; /* type of vm exit */
uint8_tvee_insn_info; /* bitfield */
#defineVEE_LEN_VALID 0x1 /* vee_insn_len is valid */
#defineVEE_BYTES_VALID 0x2 /* vee_insn_bytes is valid */
uint8_tvee_insn_len; /* [VMX] instruction length */
uint8_tvee_insn_bytes[15]; /* [SVM] bytes at {R,E,}IP */
};
/*
* struct vcpu_inject_event : describes an exception or interrupt to inject.
*/
structvcpu_inject_event {
uint8_tvie_vector; /* Exception or interrupt vector. */
uint32_tvie_errorcode; /* Optional error code. */
uint8_tvie_type;
#defineVCPU_INJECT_NONE 0
#defineVCPU_INJECT_INTR 1 /* External hardware interrupt. */
#defineVCPU_INJECT_EX 2 /* HW or SW Exception */
#defineVCPU_INJECT_NMI 3 /* Non-maskable Interrupt */
};
/*
* struct vcpu_segment_info
*
* Describes a segment + selector set, used in constructing the initial vcpu
* register content
*/
structvcpu_segment_info {
uint16_tvsi_sel;
uint32_tvsi_limit;
uint32_tvsi_ar;
uint64_tvsi_base;
};
/* The GPRS are ordered to assist instruction decode. */
#defineVCPU_REGS_RAX 0
#defineVCPU_REGS_RCX 1
#defineVCPU_REGS_RDX 2
#defineVCPU_REGS_RBX 3
#defineVCPU_REGS_RSP 4
#defineVCPU_REGS_RBP 5
#defineVCPU_REGS_RSI 6
#defineVCPU_REGS_RDI 7
#defineVCPU_REGS_R8 8
#defineVCPU_REGS_R9 9
#defineVCPU_REGS_R10 10
#defineVCPU_REGS_R11 11
#defineVCPU_REGS_R12 12
#defineVCPU_REGS_R13 13
#defineVCPU_REGS_R14 14
#defineVCPU_REGS_R15 15
#defineVCPU_REGS_RIP 16
#defineVCPU_REGS_RFLAGS 17
#defineVCPU_REGS_NGPRS (VCPU_REGS_RFLAGS + 1)
#defineVCPU_REGS_CR0 0
#defineVCPU_REGS_CR2 1
#defineVCPU_REGS_CR3 2
#defineVCPU_REGS_CR4 3
#defineVCPU_REGS_CR8 4
#defineVCPU_REGS_XCR0 5
#defineVCPU_REGS_PDPTE0 6
#defineVCPU_REGS_PDPTE1 7
#defineVCPU_REGS_PDPTE2 8
#defineVCPU_REGS_PDPTE3 9
#defineVCPU_REGS_NCRS (VCPU_REGS_PDPTE3 + 1)
#defineVCPU_REGS_ES 0
#defineVCPU_REGS_CS 1
#defineVCPU_REGS_SS 2
#defineVCPU_REGS_DS 3
#defineVCPU_REGS_FS 4
#defineVCPU_REGS_GS 5
#defineVCPU_REGS_LDTR 6
#defineVCPU_REGS_TR 7
#defineVCPU_REGS_NSREGS (VCPU_REGS_TR + 1)
#defineVCPU_REGS_EFER 0
#defineVCPU_REGS_STAR 1
#defineVCPU_REGS_LSTAR 2
#defineVCPU_REGS_CSTAR 3
#defineVCPU_REGS_SFMASK 4
#defineVCPU_REGS_KGSBASE 5
#defineVCPU_REGS_MISC_ENABLE 6
#defineVCPU_REGS_NMSRS (VCPU_REGS_MISC_ENABLE + 1)
#defineVCPU_REGS_DR0 0
#defineVCPU_REGS_DR1 1
#defineVCPU_REGS_DR2 2
#defineVCPU_REGS_DR3 3
#defineVCPU_REGS_DR6 4
#defineVCPU_REGS_DR7 5
#defineVCPU_REGS_NDRS (VCPU_REGS_DR7 + 1)
structvcpu_reg_state {
uint64_tvrs_gprs[VCPU_REGS_NGPRS];
uint64_tvrs_crs[VCPU_REGS_NCRS];
uint64_tvrs_msrs[VCPU_REGS_NMSRS];
uint64_tvrs_drs[VCPU_REGS_NDRS];
structvcpu_segment_infovrs_sregs[VCPU_REGS_NSREGS];
structvcpu_segment_infovrs_gdtr;
structvcpu_segment_infovrs_idtr;
};
#defineVCPU_HOST_REGS_EFER 0
#defineVCPU_HOST_REGS_STAR 1
#defineVCPU_HOST_REGS_LSTAR 2
#defineVCPU_HOST_REGS_CSTAR 3
#defineVCPU_HOST_REGS_SFMASK 4
#defineVCPU_HOST_REGS_KGSBASE 5
#defineVCPU_HOST_REGS_MISC_ENABLE 6
#defineVCPU_HOST_REGS_NMSRS (VCPU_HOST_REGS_MISC_ENABLE + 1)
/*
* struct vm_exit
*
* Contains VM exit information communicated to vmd(8). This information is
* gathered by vmm(4) from the CPU on each exit that requires help from vmd.
*/
structvm_exit {
union {
structvm_exit_inoutvei; /* IN/OUT exit */
structvm_exit_eptviolationvee; /* EPT VIOLATION exit*/
};
structvcpu_reg_statevrs;
intcpl;
};
structvm_run_params {
/* Input parameters to VMM_IOC_RUN */
uint32_tvrp_vm_id;
uint32_tvrp_vcpu_id;
uint8_tvrp_continue; /* Continuing from an exit */
structvcpu_inject_eventvrp_inject;
uint8_tvrp_intr_pending; /* Additional intrs pending? */
/* Input/output parameter to VMM_IOC_RUN */
structvm_exit*vrp_exit; /* updated exit data */
/* Output parameter from VMM_IOC_RUN */
uint16_tvrp_exit_reason; /* exit reason */
uint8_tvrp_irqready; /* ready for IRQ on entry */
};
structvm_intr_params {
/* Input parameters to VMM_IOC_INTR */
uint32_tvip_vm_id;
uint32_tvip_vcpu_id;
uint16_tvip_intr;
};
#defineVM_RWVMPARAMS_PVCLOCK_SYSTEM_GPA 0x1 /* read/write pvclock gpa */
#defineVM_RWVMPARAMS_PVCLOCK_VERSION 0x2 /* read/write pvclock version */
#defineVM_RWVMPARAMS_ALL (VM_RWVMPARAMS_PVCLOCK_SYSTEM_GPA | \
VM_RWVMPARAMS_PVCLOCK_VERSION)
structvm_rwvmparams_params {
/* Input parameters to VMM_IOC_READVMPARAMS/VMM_IOC_WRITEVMPARAMS */
uint32_tvpp_vm_id;
uint32_tvpp_vcpu_id;
uint32_tvpp_mask;
paddr_tvpp_pvclock_system_gpa;
uint32_tvpp_pvclock_version;
};
#defineVM_RWREGS_GPRS 0x1 /* read/write GPRs */
#defineVM_RWREGS_SREGS 0x2 /* read/write segment registers */
#defineVM_RWREGS_CRS 0x4 /* read/write CRs */
#defineVM_RWREGS_MSRS 0x8 /* read/write MSRs */
#defineVM_RWREGS_DRS 0x10 /* read/write DRs */
#defineVM_RWREGS_ALL (VM_RWREGS_GPRS | VM_RWREGS_SREGS | VM_RWREGS_CRS | \
VM_RWREGS_MSRS | VM_RWREGS_DRS)
structvm_rwregs_params {
/*
* Input/output parameters to VMM_IOC_READREGS /
* VMM_IOC_WRITEREGS
*/
uint32_tvrwp_vm_id;
uint32_tvrwp_vcpu_id;
uint64_tvrwp_mask;
structvcpu_reg_statevrwp_regs;
};
structvm_mprotect_ept_params {
/* Input parameters to VMM_IOC_MPROTECT_EPT */
uint32_tvmep_vm_id;
uint32_tvmep_vcpu_id;
vaddr_tvmep_sgpa;
size_tvmep_size;
intvmep_prot;
};
/* IOCTL definitions */
#defineVMM_IOC_INTR _IOW('V', 6, struct vm_intr_params) /* Intr pending */
/* Control the protection of ept pages*/
#defineVMM_IOC_MPROTECT_EPT _IOW('V', 11, struct vm_mprotect_ept_params)
/* CPUID masks */
/*
* clone host capabilities minus:
* debug store (CPUIDECX_DTES64, CPUIDECX_DSCPL, CPUID_DS)
* monitor/mwait (CPUIDECX_MWAIT, CPUIDECX_MWAITX)
* vmx/svm (CPUIDECX_VMX, CPUIDECX_SVM)
* smx (CPUIDECX_SMX)
* speedstep (CPUIDECX_EST)
* thermal (CPUIDECX_TM2, CPUID_ACPI, CPUID_TM)
* context id (CPUIDECX_CNXTID)
* machine check (CPUID_MCE, CPUID_MCA)
* silicon debug (CPUIDECX_SDBG)
* xTPR (CPUIDECX_XTPR)
* perf/debug (CPUIDECX_PDCM)
* pcid (CPUIDECX_PCID)
* direct cache access (CPUIDECX_DCA)
* x2APIC (CPUIDECX_X2APIC)
* apic deadline (CPUIDECX_DEADLINE)
* apic (CPUID_APIC)
* psn (CPUID_PSN)
* self snoop (CPUID_SS)
* hyperthreading (CPUID_HTT)
* pending break enabled (CPUID_PBE)
* MTRR (CPUID_MTRR)
* Speculative execution control features (AMD)
*/
#defineVMM_CPUIDECX_MASK ~(CPUIDECX_EST | CPUIDECX_TM2 | CPUIDECX_MWAIT | \
CPUIDECX_PDCM | CPUIDECX_VMX | CPUIDECX_DTES64 | \
CPUIDECX_DSCPL | CPUIDECX_SMX | CPUIDECX_CNXTID | \
CPUIDECX_SDBG | CPUIDECX_XTPR | CPUIDECX_PCID | \
CPUIDECX_DCA | CPUIDECX_X2APIC | CPUIDECX_DEADLINE)
#defineVMM_ECPUIDECX_MASK ~(CPUIDECX_SVM | CPUIDECX_MWAITX)
#defineVMM_CPUIDEDX_MASK ~(CPUID_ACPI | CPUID_TM | \
CPUID_HTT | CPUID_DS | CPUID_APIC | \
CPUID_PSN | CPUID_SS | CPUID_PBE | \
CPUID_MTRR | CPUID_MCE | CPUID_MCA)
#defineVMM_AMDSPEC_EBX_MASK ~(CPUIDEBX_IBPB | CPUIDEBX_IBRS | \
CPUIDEBX_STIBP | CPUIDEBX_IBRS_ALWAYSON | CPUIDEBX_STIBP_ALWAYSON | \
CPUIDEBX_IBRS_PREF | CPUIDEBX_SSBD | CPUIDEBX_VIRT_SSBD | \
CPUIDEBX_SSBD_NOTREQ)
/* This mask is an include list for bits we want to expose */
#defineVMM_APMI_EDX_INCLUDE_MASK (CPUIDEDX_ITSC)
/*
* SEFF flags - copy from host minus:
* TSC_ADJUST (SEFF0EBX_TSC_ADJUST)
* SGX (SEFF0EBX_SGX)
* HLE (SEFF0EBX_HLE)
* INVPCID (SEFF0EBX_INVPCID)
* RTM (SEFF0EBX_RTM)
* PQM (SEFF0EBX_PQM)
* AVX512F (SEFF0EBX_AVX512F)
* AVX512DQ (SEFF0EBX_AVX512DQ)
* AVX512IFMA (SEFF0EBX_AVX512IFMA)
* AVX512PF (SEFF0EBX_AVX512PF)
* AVX512ER (SEFF0EBX_AVX512ER)
* AVX512CD (SEFF0EBX_AVX512CD)
* AVX512BW (SEFF0EBX_AVX512BW)
* AVX512VL (SEFF0EBX_AVX512VL)
* MPX (SEFF0EBX_MPX)
* PCOMMIT (SEFF0EBX_PCOMMIT)
* PT (SEFF0EBX_PT)
*/
#defineVMM_SEFF0EBX_MASK ~(SEFF0EBX_TSC_ADJUST | SEFF0EBX_SGX | \
SEFF0EBX_HLE | SEFF0EBX_INVPCID | \
SEFF0EBX_RTM | SEFF0EBX_PQM | SEFF0EBX_MPX | \
SEFF0EBX_PCOMMIT | SEFF0EBX_PT | \
SEFF0EBX_AVX512F | SEFF0EBX_AVX512DQ | \
SEFF0EBX_AVX512IFMA | SEFF0EBX_AVX512PF | \
SEFF0EBX_AVX512ER | SEFF0EBX_AVX512CD | \
SEFF0EBX_AVX512BW | SEFF0EBX_AVX512VL)
/* ECX mask contains the bits to include */
#defineVMM_SEFF0ECX_MASK (SEFF0ECX_UMIP)
/* EDX mask contains the bits to include */
#defineVMM_SEFF0EDX_MASK (SEFF0EDX_MD_CLEAR)
/*
* Extended function flags - copy from host minus:
* 0x80000001 EDX:RDTSCP Support
*/
#defineVMM_FEAT_EFLAGS_MASK ~(CPUID_RDTSCP)
/*
* CPUID[0x4] deterministic cache info
*/
#defineVMM_CPUID4_CACHE_TOPOLOGY_MASK 0x3FF
#ifdef_KERNEL
#defineVMX_FAIL_LAUNCH_UNKNOWN 1
#defineVMX_FAIL_LAUNCH_INVALID_VMCS 2
#defineVMX_FAIL_LAUNCH_VALID_VMCS 3
/* MSR bitmap manipulation macros */
#defineVMX_MSRIDX(m) ((m) / 8)
#defineVMX_MSRBIT(m) (1 << (m) % 8)
#defineSVM_MSRIDX(m) ((m) / 4)
#defineSVM_MSRBIT_R(m) (1 << (((m) % 4) * 2))
#defineSVM_MSRBIT_W(m) (1 << (((m) % 4) * 2 + 1))
enum {
VMM_MODE_UNKNOWN,
VMM_MODE_EPT,
VMM_MODE_RVI
};
enum {
VMM_MEM_TYPE_REGULAR,
VMM_MEM_TYPE_MMIO,
VMM_MEM_TYPE_UNKNOWN
};
/* Forward declarations */
structvm;
/*
* Implementation-specific cpu state
*/
structvmcb_segment {
uint16_tvs_sel; /* 000h */
uint16_tvs_attr; /* 002h */
uint32_tvs_lim; /* 004h */
uint64_tvs_base; /* 008h */
};
structvmcb {
union {
struct {
uint32_tv_cr_rw; /* 000h */
uint32_tv_dr_rw; /* 004h */
uint32_tv_excp; /* 008h */
uint32_tv_intercept1; /* 00Ch */
uint32_tv_intercept2; /* 010h */
uint8_tv_pad1[0x28]; /* 014h-03Bh */
uint16_tv_pause_thr; /* 03Ch */
uint16_tv_pause_ct; /* 03Eh */
uint64_tv_iopm_pa; /* 040h */
uint64_tv_msrpm_pa; /* 048h */
uint64_tv_tsc_offset; /* 050h */
uint32_tv_asid; /* 058h */
uint8_tv_tlb_control; /* 05Ch */
uint8_tv_pad2[0x3]; /* 05Dh-05Fh */
uint8_tv_tpr; /* 060h */
uint8_tv_irq; /* 061h */
uint8_tv_intr_misc; /* 062h */
uint8_tv_intr_masking; /* 063h */
uint8_tv_intr_vector; /* 064h */
uint8_tv_pad3[0x3]; /* 065h-067h */
uint64_tv_intr_shadow; /* 068h */
uint64_tv_exitcode; /* 070h */
uint64_tv_exitinfo1; /* 078h */
uint64_tv_exitinfo2; /* 080h */
uint64_tv_exitintinfo; /* 088h */
uint64_tv_np_enable; /* 090h */
uint64_tv_avic_apic_bar; /* 098h */
uint64_tv_pad4; /* 0A0h */
uint64_tv_eventinj; /* 0A8h */
uint64_tv_n_cr3; /* 0B0h */
uint64_tv_lbr_virt_enable; /* 0B8h */
uint64_tv_vmcb_clean_bits; /* 0C0h */
uint64_tv_nrip; /* 0C8h */
uint8_tv_n_bytes_fetched; /* 0D0h */
uint8_tv_guest_ins_bytes[0xf]; /* 0D1h-0DFh */
uint64_tv_avic_apic_back_page; /* 0E0h */
uint64_tv_pad5; /* 0E8h-0EFh */
uint64_tv_avic_logical_table; /* 0F0h */
uint64_tv_avic_phys; /* 0F8h */
};
uint8_tvmcb_control[0x400];
};
union {
struct {
/* Offsets here are relative to start of VMCB SSA */
structvmcb_segmentv_es; /* 000h */
structvmcb_segmentv_cs; /* 010h */
structvmcb_segmentv_ss; /* 020h */
structvmcb_segmentv_ds; /* 030h */
structvmcb_segmentv_fs; /* 040h */
structvmcb_segmentv_gs; /* 050h */
structvmcb_segmentv_gdtr; /* 060h */
structvmcb_segmentv_ldtr; /* 070h */
structvmcb_segmentv_idtr; /* 080h */
structvmcb_segmentv_tr; /* 090h */
uint8_tv_pad6[0x2B]; /* 0A0h-0CAh */
uint8_tv_cpl; /* 0CBh */
uint32_tv_pad7; /* 0CCh-0CFh */
uint64_tv_efer; /* 0D0h */
uint8_tv_pad8[0x70]; /* 0D8h-147h */
uint64_tv_cr4; /* 148h */
uint64_tv_cr3; /* 150h */
uint64_tv_cr0; /* 158h */
uint64_tv_dr7; /* 160h */
uint64_tv_dr6; /* 168h */
uint64_tv_rflags; /* 170h */
uint64_tv_rip; /* 178h */
uint64_tv_pad9[0xB]; /* 180h-1D7h */
uint64_tv_rsp; /* 1D8h */
uint64_tv_pad10[0x3]; /* 1E0h-1F7h */
uint64_tv_rax; /* 1F8h */
uint64_tv_star; /* 200h */
uint64_tv_lstar; /* 208h */
uint64_tv_cstar; /* 210h */
uint64_tv_sfmask; /* 218h */
uint64_tv_kgsbase; /* 220h */
uint64_tv_sysenter_cs; /* 228h */
uint64_tv_sysenter_esp; /* 230h */
uint64_tv_sysenter_eip; /* 238h */
uint64_tv_cr2; /* 240h */
uint64_tv_pad11[0x4]; /* 248h-267h */
uint64_tv_g_pat; /* 268h */
uint64_tv_dbgctl; /* 270h */
uint64_tv_br_from; /* 278h */
uint64_tv_br_to; /* 280h */
uint64_tv_lastexcpfrom; /* 288h */
uint64_tv_lastexcpto; /* 290h */
};
uint8_tvmcb_layout[PAGE_SIZE-0x400];
};
};
structvmcs {
uint32_tvmcs_revision;
};
structvmx_invvpid_descriptor
{
uint64_tvid_vpid;
uint64_tvid_addr;
};
structvmx_invept_descriptor
{
uint64_tvid_eptp;
uint64_tvid_reserved;
};
structvmx_msr_store
{
uint64_tvms_index;
uint64_tvms_data;
};
/*
* Storage for guest registers not preserved in VMCS and various exit
* information.
*
* Note that vmx/svm_enter_guest depend on the layout of this struct for
* field access.
*/
structvcpu_gueststate
{
/* %rsi should be first */
uint64_tvg_rsi; /* 0x00 */
uint64_tvg_rax; /* 0x08 */
uint64_tvg_rbx; /* 0x10 */
uint64_tvg_rcx; /* 0x18 */
uint64_tvg_rdx; /* 0x20 */
uint64_tvg_rdi; /* 0x28 */
uint64_tvg_rbp; /* 0x30 */
uint64_tvg_r8; /* 0x38 */
uint64_tvg_r9; /* 0x40 */
uint64_tvg_r10; /* 0x48 */
uint64_tvg_r11; /* 0x50 */
uint64_tvg_r12; /* 0x58 */
uint64_tvg_r13; /* 0x60 */
uint64_tvg_r14; /* 0x68 */
uint64_tvg_r15; /* 0x70 */
uint64_tvg_cr2; /* 0x78 */
uint64_tvg_rip; /* 0x80 */
uint32_tvg_exit_reason; /* 0x88 */
uint64_tvg_rflags; /* 0x90 */
uint64_tvg_xcr0; /* 0x98 */
/*
* Debug registers
* - %dr4/%dr5 are aliased to %dr6/%dr7 (or cause #DE)
* - %dr7 is saved automatically in the VMCS
*/
uint64_tvg_dr0; /* 0xa0 */
uint64_tvg_dr1; /* 0xa8 */
uint64_tvg_dr2; /* 0xb0 */
uint64_tvg_dr3; /* 0xb8 */
uint64_tvg_dr6; /* 0xc0 */
};
/*
* Virtual CPU
*
* Methods used to vcpu struct members:
* a atomic operations
* I immutable operations
* K kernel lock
* r reference count
* v vcpu rwlock
* V vm struct's vcpu list lock (vm_vcpu_lock)
*/
structvcpu {
/*
* Guest FPU state - this must remain as the first member of the struct
* to ensure 64-byte alignment (set up during vcpu_pool init)
*/
structsavefpuvc_g_fpu; /* [v] */
/* VMCS / VMCB pointer */
vaddr_tvc_control_va; /* [I] */
paddr_tvc_control_pa; /* [I] */
/* VLAPIC pointer */
vaddr_tvc_vlapic_va; /* [I] */
uint64_tvc_vlapic_pa; /* [I] */
/* MSR bitmap address */
vaddr_tvc_msr_bitmap_va; /* [I] */
uint64_tvc_msr_bitmap_pa; /* [I] */
structvm*vc_parent; /* [I] */
uint32_tvc_id; /* [I] */
uint16_tvc_vpid; /* [I] */
u_intvc_state; /* [a] */
SLIST_ENTRY(vcpu) vc_vcpu_link; /* [V] */
uint8_tvc_virt_mode; /* [I] */
structrwlockvc_lock;
structcpu_info*vc_curcpu; /* [a] */
structcpu_info*vc_last_pcpu; /* [v] */
structvm_exitvc_exit; /* [v] */
uint16_tvc_intr; /* [v] */
uint8_tvc_irqready; /* [v] */
uint8_tvc_fpuinited; /* [v] */
uint64_tvc_h_xcr0; /* [v] */
structvcpu_gueststatevc_gueststate; /* [v] */
structvcpu_inject_eventvc_inject; /* [v] */
uint32_tvc_pvclock_version; /* [v] */
paddr_tvc_pvclock_system_gpa; /* [v] */
uint32_tvc_pvclock_system_tsc_mul; /* [v] */
/* Shadowed MSRs */
uint64_tvc_shadow_pat; /* [v] */
/* Userland Protection Keys */
uint32_tvc_pkru; /* [v] */
/* VMX only (all requiring [v]) */
uint64_tvc_vmx_basic;
uint64_tvc_vmx_entry_ctls;
uint64_tvc_vmx_true_entry_ctls;
uint64_tvc_vmx_exit_ctls;
uint64_tvc_vmx_true_exit_ctls;
uint64_tvc_vmx_pinbased_ctls;
uint64_tvc_vmx_true_pinbased_ctls;
uint64_tvc_vmx_procbased_ctls;
uint64_tvc_vmx_true_procbased_ctls;
uint64_tvc_vmx_procbased2_ctls;
vaddr_tvc_vmx_msr_exit_save_va;
paddr_tvc_vmx_msr_exit_save_pa;
vaddr_tvc_vmx_msr_exit_load_va;
paddr_tvc_vmx_msr_exit_load_pa;
#if0/* XXX currently use msr_exit_save for msr_entry_load too */
vaddr_tvc_vmx_msr_entry_load_va;
paddr_tvc_vmx_msr_entry_load_pa;
#endif
uint8_tvc_vmx_vpid_enabled;
uint64_tvc_vmx_cr0_fixed1;
uint64_tvc_vmx_cr0_fixed0;
uint32_tvc_vmx_vmcs_state; /* [a] */
#defineVMCS_CLEARED 0
#defineVMCS_LAUNCHED 1
/* SVM only (all requiring [v]) */
vaddr_tvc_svm_hsa_va;
paddr_tvc_svm_hsa_pa;
vaddr_tvc_svm_ioio_va;
paddr_tvc_svm_ioio_pa;
};
SLIST_HEAD(vcpu_head, vcpu);
voidvmm_dispatch_intr(vaddr_t);
intvmxon(uint64_t*);
intvmxoff(void);
intvmclear(paddr_t*);
intvmptrld(paddr_t*);
intvmptrst(paddr_t*);
intvmwrite(uint64_t, uint64_t);
intvmread(uint64_t, uint64_t*);
intinvvpid(uint64_t, structvmx_invvpid_descriptor*);
intinvept(uint64_t, structvmx_invept_descriptor*);
intvmx_enter_guest(paddr_t*, structvcpu_gueststate*, int, uint8_t);
intsvm_enter_guest(uint64_t, structvcpu_gueststate*,
structregion_descriptor*);
voidstart_vmm_on_cpu(structcpu_info*);
voidstop_vmm_on_cpu(structcpu_info*);
voidvmclear_on_cpu(structcpu_info*);
voidvmm_attach_machdep(structdevice*, structdevice*, void*);
voidvmm_activate_machdep(structdevice*, int);
intvmmioctl_machdep(dev_t, u_long, caddr_t, int, structproc*);
intpledge_ioctl_vmm_machdep(structproc*, long);
intvmm_start(void);
intvmm_stop(void);
intvm_impl_init(structvm*, structproc*);
voidvm_impl_deinit(structvm*);
intvcpu_init(structvcpu*);
voidvcpu_deinit(structvcpu*);
intvm_rwvmparams(structvm_rwvmparams_params*, int);
intvm_rwregs(structvm_rwregs_params*, int);
intvm_run(structvm_run_params*);
intvcpu_reset_regs(structvcpu*, structvcpu_reg_state*);
#endif/* _KERNEL */
#endif/* ! _MACHINE_VMMVAR_H_ */