vmx.c 225 KB
Newer Older
Avi Kivity's avatar
Avi Kivity committed
1
2
3
4
5
6
7
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * Copyright (C) 2006 Qumranet, Inc.
8
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity's avatar
Avi Kivity committed
9
10
11
12
13
14
15
16
17
18
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

19
#include "irq.h"
20
#include "mmu.h"
Avi Kivity's avatar
Avi Kivity committed
21
#include "cpuid.h"
Avi Kivity's avatar
Avi Kivity committed
22

23
#include <linux/kvm_host.h>
Avi Kivity's avatar
Avi Kivity committed
24
#include <linux/module.h>
25
#include <linux/kernel.h>
Avi Kivity's avatar
Avi Kivity committed
26
27
#include <linux/mm.h>
#include <linux/highmem.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
28
#include <linux/sched.h>
29
#include <linux/moduleparam.h>
30
#include <linux/mod_devicetable.h>
31
#include <linux/ftrace_event.h>
32
#include <linux/slab.h>
33
#include <linux/tboot.h>
34
#include "kvm_cache_regs.h"
35
#include "x86.h"
Avi Kivity's avatar
Avi Kivity committed
36

Avi Kivity's avatar
Avi Kivity committed
37
#include <asm/io.h>
38
#include <asm/desc.h>
39
#include <asm/vmx.h>
40
#include <asm/virtext.h>
41
#include <asm/mce.h>
42
43
#include <asm/i387.h>
#include <asm/xcr.h>
44
#include <asm/perf_event.h>
45
#include <asm/kexec.h>
Avi Kivity's avatar
Avi Kivity committed
46

47
48
#include "trace.h"

49
#define __ex(x) __kvm_handle_fault_on_reboot(x)
50
51
#define __ex_clear(x, reg) \
	____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
52

Avi Kivity's avatar
Avi Kivity committed
53
54
55
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

56
57
58
59
60
61
static const struct x86_cpu_id vmx_cpu_id[] = {
	X86_FEATURE_MATCH(X86_FEATURE_VMX),
	{}
};
MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);

62
static bool __read_mostly enable_vpid = 1;
63
module_param_named(vpid, enable_vpid, bool, 0444);
64

65
static bool __read_mostly flexpriority_enabled = 1;
66
module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
67

68
static bool __read_mostly enable_ept = 1;
69
module_param_named(ept, enable_ept, bool, S_IRUGO);
Sheng Yang's avatar
Sheng Yang committed
70

71
static bool __read_mostly enable_unrestricted_guest = 1;
72
73
74
module_param_named(unrestricted_guest,
			enable_unrestricted_guest, bool, S_IRUGO);

75
76
77
static bool __read_mostly enable_ept_ad_bits = 1;
module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);

78
static bool __read_mostly emulate_invalid_guest_state = true;
79
module_param(emulate_invalid_guest_state, bool, S_IRUGO);
80

81
static bool __read_mostly vmm_exclusive = 1;
82
83
module_param(vmm_exclusive, bool, S_IRUGO);

84
static bool __read_mostly fasteoi = 1;
85
86
module_param(fasteoi, bool, S_IRUGO);

87
88
static bool __read_mostly enable_apicv;
module_param(enable_apicv, bool, S_IRUGO);
89

90
91
92
93
94
/*
 * If nested=1, nested virtualization is supported, i.e., guests may use
 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
 * use VMX instructions.
 */
95
static bool __read_mostly nested = 0;
96
97
module_param(nested, bool, S_IRUGO);

98
99
#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
100
101
#define KVM_VM_CR0_ALWAYS_ON						\
	(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
102
103
104
105
#define KVM_CR4_GUEST_OWNED_BITS				      \
	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
	 | X86_CR4_OSXMMEXCPT)

106
107
108
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)

109
110
#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))

111
112
113
114
/*
 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
 * ple_gap:    upper bound on the amount of time between two successive
 *             executions of PAUSE in a loop. Also indicate if ple enabled.
115
 *             According to test, this time is usually smaller than 128 cycles.
116
117
118
119
120
121
 * ple_window: upper bound on the amount of time a guest is allowed to execute
 *             in a PAUSE loop. Tests indicate that most spinlocks are held for
 *             less than 2^12 cycles
 * Time is measured based on a counter that runs at the same rate as the TSC,
 * refer SDM volume 3b section 21.6.13 & 22.1.3.
 */
122
#define KVM_VMX_DEFAULT_PLE_GAP    128
123
124
125
126
127
128
129
#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
module_param(ple_gap, int, S_IRUGO);

static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
module_param(ple_window, int, S_IRUGO);

Avi Kivity's avatar
Avi Kivity committed
130
131
extern const ulong vmx_return;

132
#define NR_AUTOLOAD_MSRS 8
133
#define VMCS02_POOL_SIZE 1
134

135
136
137
138
139
140
struct vmcs {
	u32 revision_id;
	u32 abort;
	char data[0];
};

141
142
143
144
145
146
147
148
149
150
151
152
/*
 * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
 * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
 * loaded on this CPU (so we can clear them if the CPU goes down).
 */
struct loaded_vmcs {
	struct vmcs *vmcs;
	int cpu;
	int launched;
	struct list_head loaded_vmcss_on_cpu_link;
};

153
154
155
struct shared_msr_entry {
	unsigned index;
	u64 data;
156
	u64 mask;
157
158
};

159
160
161
162
163
164
165
166
167
168
169
170
171
/*
 * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
 * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
 * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
 * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
 * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
 * More than one of these structures may exist, if L1 runs multiple L2 guests.
 * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
 * underlying hardware which will be used to run L2.
 * This structure is packed to ensure that its layout is identical across
 * machines (necessary for live migration).
 * If there are changes in this struct, VMCS12_REVISION must be changed.
 */
172
typedef u64 natural_width;
173
174
175
176
177
178
struct __packed vmcs12 {
	/* According to the Intel spec, a VMCS region must start with the
	 * following two fields. Then follow implementation-specific data.
	 */
	u32 revision_id;
	u32 abort;
179

Nadav Har'El's avatar
Nadav Har'El committed
180
181
182
	u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
	u32 padding[7]; /* room for future expansion */

183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
	u64 io_bitmap_a;
	u64 io_bitmap_b;
	u64 msr_bitmap;
	u64 vm_exit_msr_store_addr;
	u64 vm_exit_msr_load_addr;
	u64 vm_entry_msr_load_addr;
	u64 tsc_offset;
	u64 virtual_apic_page_addr;
	u64 apic_access_addr;
	u64 ept_pointer;
	u64 guest_physical_address;
	u64 vmcs_link_pointer;
	u64 guest_ia32_debugctl;
	u64 guest_ia32_pat;
	u64 guest_ia32_efer;
	u64 guest_ia32_perf_global_ctrl;
	u64 guest_pdptr0;
	u64 guest_pdptr1;
	u64 guest_pdptr2;
	u64 guest_pdptr3;
	u64 host_ia32_pat;
	u64 host_ia32_efer;
	u64 host_ia32_perf_global_ctrl;
	u64 padding64[8]; /* room for future expansion */
	/*
	 * To allow migration of L1 (complete with its L2 guests) between
	 * machines of different natural widths (32 or 64 bit), we cannot have
	 * unsigned long fields with no explict size. We use u64 (aliased
	 * natural_width) instead. Luckily, x86 is little-endian.
	 */
	natural_width cr0_guest_host_mask;
	natural_width cr4_guest_host_mask;
	natural_width cr0_read_shadow;
	natural_width cr4_read_shadow;
	natural_width cr3_target_value0;
	natural_width cr3_target_value1;
	natural_width cr3_target_value2;
	natural_width cr3_target_value3;
	natural_width exit_qualification;
	natural_width guest_linear_address;
	natural_width guest_cr0;
	natural_width guest_cr3;
	natural_width guest_cr4;
	natural_width guest_es_base;
	natural_width guest_cs_base;
	natural_width guest_ss_base;
	natural_width guest_ds_base;
	natural_width guest_fs_base;
	natural_width guest_gs_base;
	natural_width guest_ldtr_base;
	natural_width guest_tr_base;
	natural_width guest_gdtr_base;
	natural_width guest_idtr_base;
	natural_width guest_dr7;
	natural_width guest_rsp;
	natural_width guest_rip;
	natural_width guest_rflags;
	natural_width guest_pending_dbg_exceptions;
	natural_width guest_sysenter_esp;
	natural_width guest_sysenter_eip;
	natural_width host_cr0;
	natural_width host_cr3;
	natural_width host_cr4;
	natural_width host_fs_base;
	natural_width host_gs_base;
	natural_width host_tr_base;
	natural_width host_gdtr_base;
	natural_width host_idtr_base;
	natural_width host_ia32_sysenter_esp;
	natural_width host_ia32_sysenter_eip;
	natural_width host_rsp;
	natural_width host_rip;
	natural_width paddingl[8]; /* room for future expansion */
	u32 pin_based_vm_exec_control;
	u32 cpu_based_vm_exec_control;
	u32 exception_bitmap;
	u32 page_fault_error_code_mask;
	u32 page_fault_error_code_match;
	u32 cr3_target_count;
	u32 vm_exit_controls;
	u32 vm_exit_msr_store_count;
	u32 vm_exit_msr_load_count;
	u32 vm_entry_controls;
	u32 vm_entry_msr_load_count;
	u32 vm_entry_intr_info_field;
	u32 vm_entry_exception_error_code;
	u32 vm_entry_instruction_len;
	u32 tpr_threshold;
	u32 secondary_vm_exec_control;
	u32 vm_instruction_error;
	u32 vm_exit_reason;
	u32 vm_exit_intr_info;
	u32 vm_exit_intr_error_code;
	u32 idt_vectoring_info_field;
	u32 idt_vectoring_error_code;
	u32 vm_exit_instruction_len;
	u32 vmx_instruction_info;
	u32 guest_es_limit;
	u32 guest_cs_limit;
	u32 guest_ss_limit;
	u32 guest_ds_limit;
	u32 guest_fs_limit;
	u32 guest_gs_limit;
	u32 guest_ldtr_limit;
	u32 guest_tr_limit;
	u32 guest_gdtr_limit;
	u32 guest_idtr_limit;
	u32 guest_es_ar_bytes;
	u32 guest_cs_ar_bytes;
	u32 guest_ss_ar_bytes;
	u32 guest_ds_ar_bytes;
	u32 guest_fs_ar_bytes;
	u32 guest_gs_ar_bytes;
	u32 guest_ldtr_ar_bytes;
	u32 guest_tr_ar_bytes;
	u32 guest_interruptibility_info;
	u32 guest_activity_state;
	u32 guest_sysenter_cs;
	u32 host_ia32_sysenter_cs;
302
303
	u32 vmx_preemption_timer_value;
	u32 padding32[7]; /* room for future expansion */
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
	u16 virtual_processor_id;
	u16 guest_es_selector;
	u16 guest_cs_selector;
	u16 guest_ss_selector;
	u16 guest_ds_selector;
	u16 guest_fs_selector;
	u16 guest_gs_selector;
	u16 guest_ldtr_selector;
	u16 guest_tr_selector;
	u16 host_es_selector;
	u16 host_cs_selector;
	u16 host_ss_selector;
	u16 host_ds_selector;
	u16 host_fs_selector;
	u16 host_gs_selector;
	u16 host_tr_selector;
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
};

/*
 * VMCS12_REVISION is an arbitrary id that should be changed if the content or
 * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
 * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
 */
#define VMCS12_REVISION 0x11e57ed0

/*
 * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
 * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
 * current implementation, 4K are reserved to avoid future complications.
 */
#define VMCS12_SIZE 0x1000

336
337
338
339
340
341
342
/* Used to remember the last vmcs02 used for some recently used vmcs12s */
struct vmcs02_list {
	struct list_head list;
	gpa_t vmptr;
	struct loaded_vmcs vmcs02;
};

343
344
345
346
347
348
349
/*
 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
 */
struct nested_vmx {
	/* Has the level1 guest done vmxon? */
	bool vmxon;
350
351
352
353
354
355

	/* The guest-physical address of the current VMCS L1 keeps for L2 */
	gpa_t current_vmptr;
	/* The host-usable pointer to the above */
	struct page *current_vmcs12_page;
	struct vmcs12 *current_vmcs12;
356
357
358
359

	/* vmcs02_list cache of VMCSs recently used to run L2 guests */
	struct list_head vmcs02_pool;
	int vmcs02_num;
360
	u64 vmcs01_tsc_offset;
361
362
	/* L2 must run next, and mustn't decide to exit to L1. */
	bool nested_run_pending;
363
364
365
366
367
	/*
	 * Guest pages referred to in vmcs02 with host-physical pointers, so
	 * we must keep them pinned while L2 runs.
	 */
	struct page *apic_access_page;
368
369
};

370
371
372
373
374
375
376
377
#define POSTED_INTR_ON  0
/* Posted-Interrupt Descriptor */
struct pi_desc {
	u32 pir[8];     /* Posted interrupt requested */
	u32 control;	/* bit 0 of control is outstanding notification bit */
	u32 rsvd[7];
} __aligned(64);

378
struct vcpu_vmx {
379
	struct kvm_vcpu       vcpu;
380
	unsigned long         host_rsp;
381
	u8                    fail;
Avi Kivity's avatar
Avi Kivity committed
382
	u8                    cpl;
383
	bool                  nmi_known_unmasked;
384
	u32                   exit_intr_info;
385
	u32                   idt_vectoring_info;
386
	ulong                 rflags;
387
	struct shared_msr_entry *guest_msrs;
388
389
	int                   nmsrs;
	int                   save_nmsrs;
390
	unsigned long	      host_idt_base;
391
#ifdef CONFIG_X86_64
392
393
	u64 		      msr_host_kernel_gs_base;
	u64 		      msr_guest_kernel_gs_base;
394
#endif
395
396
397
398
399
400
401
402
	/*
	 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
	 * non-nested (L1) guest, it always points to vmcs01. For a nested
	 * guest (L2), it points to a different VMCS.
	 */
	struct loaded_vmcs    vmcs01;
	struct loaded_vmcs   *loaded_vmcs;
	bool                  __launched; /* temporary, used in vmx_vcpu_run */
403
404
405
406
407
	struct msr_autoload {
		unsigned nr;
		struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
		struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
	} msr_autoload;
408
409
410
	struct {
		int           loaded;
		u16           fs_sel, gs_sel, ldt_sel;
411
412
413
#ifdef CONFIG_X86_64
		u16           ds_sel, es_sel;
#endif
414
415
		int           gs_ldt_reload_needed;
		int           fs_reload_needed;
Mike Day's avatar
Mike Day committed
416
	} host_state;
417
	struct {
418
		int vm86_active;
419
		ulong save_rflags;
420
421
422
423
		struct kvm_segment segs[8];
	} rmode;
	struct {
		u32 bitmask; /* 4 bits per segment (1 bit per field) */
424
425
426
427
428
		struct kvm_save_segment {
			u16 selector;
			unsigned long base;
			u32 limit;
			u32 ar;
429
		} seg[8];
430
	} segment_cache;
431
	int vpid;
432
	bool emulation_required;
433
434
435
436
437

	/* Support for vnmi-less CPUs */
	int soft_vnmi_blocked;
	ktime_t entry_time;
	s64 vnmi_blocked_time;
438
	u32 exit_reason;
439
440

	bool rdtscp_enabled;
441

442
443
444
	/* Posted interrupt descriptor */
	struct pi_desc pi_desc;

445
446
	/* Support for a guest hypervisor (nested VMX) */
	struct nested_vmx nested;
447
448
};

449
450
451
452
453
454
455
456
457
enum segment_cache_field {
	SEG_FIELD_SEL = 0,
	SEG_FIELD_BASE = 1,
	SEG_FIELD_LIMIT = 2,
	SEG_FIELD_AR = 3,

	SEG_FIELD_NR = 4
};

458
459
static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
{
460
	return container_of(vcpu, struct vcpu_vmx, vcpu);
461
462
}

463
464
465
466
467
#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
#define FIELD(number, name)	[number] = VMCS12_OFFSET(name)
#define FIELD64(number, name)	[number] = VMCS12_OFFSET(name), \
				[number##_HIGH] = VMCS12_OFFSET(name)+4

468
static const unsigned short vmcs_field_to_offset_table[] = {
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
	FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
	FIELD(GUEST_ES_SELECTOR, guest_es_selector),
	FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
	FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
	FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
	FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
	FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
	FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
	FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
	FIELD(HOST_ES_SELECTOR, host_es_selector),
	FIELD(HOST_CS_SELECTOR, host_cs_selector),
	FIELD(HOST_SS_SELECTOR, host_ss_selector),
	FIELD(HOST_DS_SELECTOR, host_ds_selector),
	FIELD(HOST_FS_SELECTOR, host_fs_selector),
	FIELD(HOST_GS_SELECTOR, host_gs_selector),
	FIELD(HOST_TR_SELECTOR, host_tr_selector),
	FIELD64(IO_BITMAP_A, io_bitmap_a),
	FIELD64(IO_BITMAP_B, io_bitmap_b),
	FIELD64(MSR_BITMAP, msr_bitmap),
	FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
	FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
	FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
	FIELD64(TSC_OFFSET, tsc_offset),
	FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
	FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
	FIELD64(EPT_POINTER, ept_pointer),
	FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
	FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
	FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
	FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
	FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
	FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
	FIELD64(GUEST_PDPTR0, guest_pdptr0),
	FIELD64(GUEST_PDPTR1, guest_pdptr1),
	FIELD64(GUEST_PDPTR2, guest_pdptr2),
	FIELD64(GUEST_PDPTR3, guest_pdptr3),
	FIELD64(HOST_IA32_PAT, host_ia32_pat),
	FIELD64(HOST_IA32_EFER, host_ia32_efer),
	FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
	FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
	FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
	FIELD(EXCEPTION_BITMAP, exception_bitmap),
	FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
	FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
	FIELD(CR3_TARGET_COUNT, cr3_target_count),
	FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
	FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
	FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
	FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
	FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
	FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
	FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
	FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
	FIELD(TPR_THRESHOLD, tpr_threshold),
	FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
	FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
	FIELD(VM_EXIT_REASON, vm_exit_reason),
	FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
	FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
	FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
	FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
	FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
	FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
	FIELD(GUEST_ES_LIMIT, guest_es_limit),
	FIELD(GUEST_CS_LIMIT, guest_cs_limit),
	FIELD(GUEST_SS_LIMIT, guest_ss_limit),
	FIELD(GUEST_DS_LIMIT, guest_ds_limit),
	FIELD(GUEST_FS_LIMIT, guest_fs_limit),
	FIELD(GUEST_GS_LIMIT, guest_gs_limit),
	FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
	FIELD(GUEST_TR_LIMIT, guest_tr_limit),
	FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
	FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
	FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
	FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
	FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
	FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
	FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
	FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
	FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
	FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
	FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
	FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
	FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
	FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
554
	FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
	FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
	FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
	FIELD(CR0_READ_SHADOW, cr0_read_shadow),
	FIELD(CR4_READ_SHADOW, cr4_read_shadow),
	FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
	FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
	FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
	FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
	FIELD(EXIT_QUALIFICATION, exit_qualification),
	FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
	FIELD(GUEST_CR0, guest_cr0),
	FIELD(GUEST_CR3, guest_cr3),
	FIELD(GUEST_CR4, guest_cr4),
	FIELD(GUEST_ES_BASE, guest_es_base),
	FIELD(GUEST_CS_BASE, guest_cs_base),
	FIELD(GUEST_SS_BASE, guest_ss_base),
	FIELD(GUEST_DS_BASE, guest_ds_base),
	FIELD(GUEST_FS_BASE, guest_fs_base),
	FIELD(GUEST_GS_BASE, guest_gs_base),
	FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
	FIELD(GUEST_TR_BASE, guest_tr_base),
	FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
	FIELD(GUEST_IDTR_BASE, guest_idtr_base),
	FIELD(GUEST_DR7, guest_dr7),
	FIELD(GUEST_RSP, guest_rsp),
	FIELD(GUEST_RIP, guest_rip),
	FIELD(GUEST_RFLAGS, guest_rflags),
	FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
	FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
	FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
	FIELD(HOST_CR0, host_cr0),
	FIELD(HOST_CR3, host_cr3),
	FIELD(HOST_CR4, host_cr4),
	FIELD(HOST_FS_BASE, host_fs_base),
	FIELD(HOST_GS_BASE, host_gs_base),
	FIELD(HOST_TR_BASE, host_tr_base),
	FIELD(HOST_GDTR_BASE, host_gdtr_base),
	FIELD(HOST_IDTR_BASE, host_idtr_base),
	FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
	FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
	FIELD(HOST_RSP, host_rsp),
	FIELD(HOST_RIP, host_rip),
};
static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);

static inline short vmcs_field_to_offset(unsigned long field)
{
	if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
		return -1;
	return vmcs_field_to_offset_table[field];
}

607
608
609
610
611
612
613
614
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
{
	return to_vmx(vcpu)->nested.current_vmcs12;
}

static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
{
	struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
615
	if (is_error_page(page))
616
		return NULL;
617

618
619
620
621
622
623
624
625
626
627
628
629
630
	return page;
}

static void nested_release_page(struct page *page)
{
	kvm_release_page_dirty(page);
}

static void nested_release_page_clean(struct page *page)
{
	kvm_release_page_clean(page);
}

631
static u64 construct_eptp(unsigned long root_hpa);
632
633
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
634
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
635
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
636
637
638
639
static void vmx_set_segment(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
static void vmx_get_segment(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
640
641
static bool guest_state_valid(struct kvm_vcpu *vcpu);
static u32 vmx_segment_access_rights(struct kvm_segment *var);
642

Avi Kivity's avatar
Avi Kivity committed
643
644
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
645
646
647
648
649
/*
 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
 */
static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
650
static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
Avi Kivity's avatar
Avi Kivity committed
651

652
653
static unsigned long *vmx_io_bitmap_a;
static unsigned long *vmx_io_bitmap_b;
654
655
static unsigned long *vmx_msr_bitmap_legacy;
static unsigned long *vmx_msr_bitmap_longmode;
656
657
static unsigned long *vmx_msr_bitmap_legacy_x2apic;
static unsigned long *vmx_msr_bitmap_longmode_x2apic;
658

659
static bool cpu_has_load_ia32_efer;
660
static bool cpu_has_load_perf_global_ctrl;
661

662
663
664
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
static DEFINE_SPINLOCK(vmx_vpid_lock);

665
static struct vmcs_config {
Avi Kivity's avatar
Avi Kivity committed
666
667
668
	int size;
	int order;
	u32 revision_id;
669
670
	u32 pin_based_exec_ctrl;
	u32 cpu_based_exec_ctrl;
671
	u32 cpu_based_2nd_exec_ctrl;
672
673
674
	u32 vmexit_ctrl;
	u32 vmentry_ctrl;
} vmcs_config;
Avi Kivity's avatar
Avi Kivity committed
675

Hannes Eder's avatar
Hannes Eder committed
676
static struct vmx_capability {
Sheng Yang's avatar
Sheng Yang committed
677
678
679
680
	u32 ept;
	u32 vpid;
} vmx_capability;

Avi Kivity's avatar
Avi Kivity committed
681
682
683
684
685
686
687
688
#define VMX_SEGMENT_FIELD(seg)					\
	[VCPU_SREG_##seg] = {                                   \
		.selector = GUEST_##seg##_SELECTOR,		\
		.base = GUEST_##seg##_BASE,		   	\
		.limit = GUEST_##seg##_LIMIT,		   	\
		.ar_bytes = GUEST_##seg##_AR_BYTES,	   	\
	}

689
static const struct kvm_vmx_segment_field {
Avi Kivity's avatar
Avi Kivity committed
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
	unsigned selector;
	unsigned base;
	unsigned limit;
	unsigned ar_bytes;
} kvm_vmx_segment_fields[] = {
	VMX_SEGMENT_FIELD(CS),
	VMX_SEGMENT_FIELD(DS),
	VMX_SEGMENT_FIELD(ES),
	VMX_SEGMENT_FIELD(FS),
	VMX_SEGMENT_FIELD(GS),
	VMX_SEGMENT_FIELD(SS),
	VMX_SEGMENT_FIELD(TR),
	VMX_SEGMENT_FIELD(LDTR),
};

705
706
static u64 host_efer;

Avi Kivity's avatar
Avi Kivity committed
707
708
static void ept_save_pdptrs(struct kvm_vcpu *vcpu);

709
/*
Brian Gerst's avatar
Brian Gerst committed
710
 * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
711
712
 * away by decrementing the array size.
 */
Avi Kivity's avatar
Avi Kivity committed
713
static const u32 vmx_msr_index[] = {
714
#ifdef CONFIG_X86_64
715
	MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
Avi Kivity's avatar
Avi Kivity committed
716
#endif
Brian Gerst's avatar
Brian Gerst committed
717
	MSR_EFER, MSR_TSC_AUX, MSR_STAR,
Avi Kivity's avatar
Avi Kivity committed
718
};
719
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
Avi Kivity's avatar
Avi Kivity committed
720

721
static inline bool is_page_fault(u32 intr_info)
Avi Kivity's avatar
Avi Kivity committed
722
723
724
{
	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
			     INTR_INFO_VALID_MASK)) ==
725
		(INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
Avi Kivity's avatar
Avi Kivity committed
726
727
}

728
static inline bool is_no_device(u32 intr_info)
729
730
731
{
	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
			     INTR_INFO_VALID_MASK)) ==
732
		(INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
733
734
}

735
static inline bool is_invalid_opcode(u32 intr_info)
736
737
738
{
	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
			     INTR_INFO_VALID_MASK)) ==
739
		(INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
740
741
}

742
static inline bool is_external_interrupt(u32 intr_info)
Avi Kivity's avatar
Avi Kivity committed
743
744
745
746
747
{
	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
		== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}

748
static inline bool is_machine_check(u32 intr_info)
749
750
751
752
753
754
{
	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
			     INTR_INFO_VALID_MASK)) ==
		(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
}

755
static inline bool cpu_has_vmx_msr_bitmap(void)
756
{
757
	return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
758
759
}

760
static inline bool cpu_has_vmx_tpr_shadow(void)
761
{
762
	return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
763
764
}

765
static inline bool vm_need_tpr_shadow(struct kvm *kvm)
766
{
767
	return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
768
769
}

770
static inline bool cpu_has_secondary_exec_ctrls(void)
771
{
772
773
	return vmcs_config.cpu_based_exec_ctrl &
		CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
774
775
}

776
static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
777
{
778
779
780
781
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
}

782
783
784
785
786
787
static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
{
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
}

788
789
790
791
792
793
static inline bool cpu_has_vmx_apic_register_virt(void)
{
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_APIC_REGISTER_VIRT;
}

794
795
796
797
798
799
static inline bool cpu_has_vmx_virtual_intr_delivery(void)
{
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
}

800
801
802
803
804
805
806
807
808
809
810
811
static inline bool cpu_has_vmx_posted_intr(void)
{
	return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
}

static inline bool cpu_has_vmx_apicv(void)
{
	return cpu_has_vmx_apic_register_virt() &&
		cpu_has_vmx_virtual_intr_delivery() &&
		cpu_has_vmx_posted_intr();
}

812
813
814
815
static inline bool cpu_has_vmx_flexpriority(void)
{
	return cpu_has_vmx_tpr_shadow() &&
		cpu_has_vmx_virtualize_apic_accesses();
816
817
}

818
819
static inline bool cpu_has_vmx_ept_execute_only(void)
{
820
	return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
821
822
823
824
}

static inline bool cpu_has_vmx_eptp_uncacheable(void)
{
825
	return vmx_capability.ept & VMX_EPTP_UC_BIT;
826
827
828
829
}

static inline bool cpu_has_vmx_eptp_writeback(void)
{
830
	return vmx_capability.ept & VMX_EPTP_WB_BIT;
831
832
833
834
}

static inline bool cpu_has_vmx_ept_2m_page(void)
{
835
	return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
836
837
}

838
839
static inline bool cpu_has_vmx_ept_1g_page(void)
{
840
	return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
841
842
}

843
844
845
846
847
static inline bool cpu_has_vmx_ept_4levels(void)
{
	return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
}

848
849
850
851
852
static inline bool cpu_has_vmx_ept_ad_bits(void)
{
	return vmx_capability.ept & VMX_EPT_AD_BIT;
}

853
static inline bool cpu_has_vmx_invept_context(void)
Sheng Yang's avatar
Sheng Yang committed
854
{
855
	return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
Sheng Yang's avatar
Sheng Yang committed
856
857
}

858
static inline bool cpu_has_vmx_invept_global(void)
Sheng Yang's avatar
Sheng Yang committed
859
{
860
	return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
Sheng Yang's avatar
Sheng Yang committed
861
862
}

863
864
865
866
867
static inline bool cpu_has_vmx_invvpid_single(void)
{
	return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
}

868
869
870
871
872
static inline bool cpu_has_vmx_invvpid_global(void)
{
	return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
}

873
static inline bool cpu_has_vmx_ept(void)
Sheng Yang's avatar
Sheng Yang committed
874
{
875
876
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_ENABLE_EPT;
Sheng Yang's avatar
Sheng Yang committed
877
878
}

879
static inline bool cpu_has_vmx_unrestricted_guest(void)
880
881
882
883
884
{
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_UNRESTRICTED_GUEST;
}

885
static inline bool cpu_has_vmx_ple(void)
886
887
888
889
890
{
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_PAUSE_LOOP_EXITING;
}

891
static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
892
{
893
	return flexpriority_enabled && irqchip_in_kernel(kvm);
894
895
}

896
static inline bool cpu_has_vmx_vpid(void)
897
{
898
899
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_ENABLE_VPID;
900
901
}

902
static inline bool cpu_has_vmx_rdtscp(void)
903
904
905
906
907
{
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_RDTSCP;
}

908
909
910
911
912
913
static inline bool cpu_has_vmx_invpcid(void)
{
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_ENABLE_INVPCID;
}

914
static inline bool cpu_has_virtual_nmis(void)
915
916
917
918
{
	return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
}

919
920
921
922
923
924
static inline bool cpu_has_vmx_wbinvd_exit(void)
{
	return vmcs_config.cpu_based_2nd_exec_ctrl &
		SECONDARY_EXEC_WBINVD_EXITING;
}

925
926
927
928
929
static inline bool report_flexpriority(void)
{
	return flexpriority_enabled;
}

930
931
932
933
934
935
936
937
938
939
940
941
static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
{
	return vmcs12->cpu_based_vm_exec_control & bit;
}

static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
{
	return (vmcs12->cpu_based_vm_exec_control &
			CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
		(vmcs12->secondary_vm_exec_control & bit);
}

942
943
944
945
946
947
948
949
950
951
952
953
954
static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12,
	struct kvm_vcpu *vcpu)
{
	return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
}

static inline bool is_exception(u32 intr_info)
{
	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
		== (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
}

static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
955
956
957
958
static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
			struct vmcs12 *vmcs12,
			u32 reason, unsigned long qualification);

959
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
960
961
962
{
	int i;

963
	for (i = 0; i < vmx->nmsrs; ++i)
964
		if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
965
966
967
968
			return i;
	return -1;
}

969
970
971
972
973
974
975
976
static inline void __invvpid(int ext, u16 vpid, gva_t gva)
{
    struct {
	u64 vpid : 16;
	u64 rsvd : 48;
	u64 gva;
    } operand = { vpid, 0, gva };

977
    asm volatile (__ex(ASM_VMX_INVVPID)
978
979
980
981
982
		  /* CF==1 or ZF==1 --> rc = -1 */
		  "; ja 1f ; ud2 ; 1:"
		  : : "a"(&operand), "c"(ext) : "cc", "memory");
}

983
984
985
986
987
988
static inline void __invept(int ext, u64 eptp, gpa_t gpa)
{
	struct {
		u64 eptp, gpa;
	} operand = {eptp, gpa};

989
	asm volatile (__ex(ASM_VMX_INVEPT)
990
991
992
993
994
			/* CF==1 or ZF==1 --> rc = -1 */
			"; ja 1f ; ud2 ; 1:\n"
			: : "a" (&operand), "c" (ext) : "cc", "memory");
}

995
static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
996
997
998
{
	int i;

999
	i = __find_msr_index(vmx, msr);
1000
	if (i >= 0)
For faster browsing, not all history is shown. View entire blame