x86.c 179 KB
Newer Older
1
2
3
4
5
6
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * derived from drivers/kvm/kvm_main.c
 *
 * Copyright (C) 2006 Qumranet, Inc.
Ben-Ami Yassour's avatar
Ben-Ami Yassour committed
7
8
 * Copyright (C) 2008 Qumranet, Inc.
 * Copyright IBM Corporation, 2008
9
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10
11
12
13
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
Ben-Ami Yassour's avatar
Ben-Ami Yassour committed
14
15
 *   Amit Shah    <amit.shah@qumranet.com>
 *   Ben-Ami Yassour <benami@il.ibm.com>
16
17
18
19
20
21
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

22
#include <linux/kvm_host.h>
23
#include "irq.h"
24
#include "mmu.h"
Sheng Yang's avatar
Sheng Yang committed
25
#include "i8254.h"
26
#include "tss.h"
27
#include "kvm_cache_regs.h"
28
#include "x86.h"
Avi Kivity's avatar
Avi Kivity committed
29
#include "cpuid.h"
30

31
#include <linux/clocksource.h>
Ben-Ami Yassour's avatar
Ben-Ami Yassour committed
32
#include <linux/interrupt.h>
33
34
35
#include <linux/kvm.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
36
#include <linux/module.h>
37
#include <linux/mman.h>
38
#include <linux/highmem.h>
39
#include <linux/iommu.h>
40
#include <linux/intel-iommu.h>
41
#include <linux/cpufreq.h>
42
#include <linux/user-return-notifier.h>
43
#include <linux/srcu.h>
44
#include <linux/slab.h>
45
#include <linux/perf_event.h>
46
#include <linux/uaccess.h>
47
#include <linux/hash.h>
48
#include <linux/pci.h>
49
50
#include <linux/timekeeper_internal.h>
#include <linux/pvclock_gtod.h>
Avi Kivity's avatar
Avi Kivity committed
51
#include <trace/events/kvm.h>
Xiao Guangrong's avatar
Xiao Guangrong committed
52

53
54
#define CREATE_TRACE_POINTS
#include "trace.h"
55

56
#include <asm/debugreg.h>
57
#include <asm/msr.h>
58
#include <asm/desc.h>
Sheng Yang's avatar
Sheng Yang committed
59
#include <asm/mtrr.h>
Huang Ying's avatar
Huang Ying committed
60
#include <asm/mce.h>
61
#include <asm/i387.h>
62
#include <asm/fpu-internal.h> /* Ugh! */
Sheng Yang's avatar
Sheng Yang committed
63
#include <asm/xcr.h>
64
#include <asm/pvclock.h>
65
#include <asm/div64.h>
66

67
#define MAX_IO_MSRS 256
Huang Ying's avatar
Huang Ying committed
68
#define KVM_MAX_MCE_BANKS 32
69
#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
Huang Ying's avatar
Huang Ying committed
70

71
72
73
#define emul_to_vcpu(ctxt) \
	container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)

74
75
76
77
78
/* EFER defaults:
 * - enable syscall per default because its emulated by KVM
 * - enable LME and LMA per default on 64 bit KVM
 */
#ifdef CONFIG_X86_64
79
80
static
u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
81
#else
82
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
83
#endif
84

85
86
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
87

88
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
Avi Kivity's avatar
Avi Kivity committed
89
static void process_nmi(struct kvm_vcpu *vcpu);
90

91
struct kvm_x86_ops *kvm_x86_ops;
92
EXPORT_SYMBOL_GPL(kvm_x86_ops);
93

94
95
static bool ignore_msrs = 0;
module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
96

97
98
99
100
101
bool kvm_has_tsc_control;
EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
u32  kvm_max_guest_tsc_khz;
EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);

102
103
104
105
/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
static u32 tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);

106
107
108
109
#define KVM_NR_SHARED_MSRS 16

struct kvm_shared_msrs_global {
	int nr;
110
	u32 msrs[KVM_NR_SHARED_MSRS];
111
112
113
114
115
};

struct kvm_shared_msrs {
	struct user_return_notifier urn;
	bool registered;
116
117
118
119
	struct kvm_shared_msr_values {
		u64 host;
		u64 curr;
	} values[KVM_NR_SHARED_MSRS];
120
121
122
123
124
};

static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);

125
struct kvm_stats_debugfs_item debugfs_entries[] = {
126
127
128
129
130
131
132
133
134
	{ "pf_fixed", VCPU_STAT(pf_fixed) },
	{ "pf_guest", VCPU_STAT(pf_guest) },
	{ "tlb_flush", VCPU_STAT(tlb_flush) },
	{ "invlpg", VCPU_STAT(invlpg) },
	{ "exits", VCPU_STAT(exits) },
	{ "io_exits", VCPU_STAT(io_exits) },
	{ "mmio_exits", VCPU_STAT(mmio_exits) },
	{ "signal_exits", VCPU_STAT(signal_exits) },
	{ "irq_window", VCPU_STAT(irq_window_exits) },
135
	{ "nmi_window", VCPU_STAT(nmi_window_exits) },
136
137
	{ "halt_exits", VCPU_STAT(halt_exits) },
	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
138
	{ "hypercalls", VCPU_STAT(hypercalls) },
139
140
141
142
143
144
145
	{ "request_irq", VCPU_STAT(request_irq_exits) },
	{ "irq_exits", VCPU_STAT(irq_exits) },
	{ "host_state_reload", VCPU_STAT(host_state_reload) },
	{ "efer_reload", VCPU_STAT(efer_reload) },
	{ "fpu_reload", VCPU_STAT(fpu_reload) },
	{ "insn_emulation", VCPU_STAT(insn_emulation) },
	{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
146
	{ "irq_injections", VCPU_STAT(irq_injections) },
147
	{ "nmi_injections", VCPU_STAT(nmi_injections) },
Avi Kivity's avatar
Avi Kivity committed
148
149
150
151
152
153
	{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
	{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
	{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
	{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
	{ "mmu_flooded", VM_STAT(mmu_flooded) },
	{ "mmu_recycled", VM_STAT(mmu_recycled) },
154
	{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
155
	{ "mmu_unsync", VM_STAT(mmu_unsync) },
156
	{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Marcelo Tosatti's avatar
Marcelo Tosatti committed
157
	{ "largepages", VM_STAT(lpages) },
158
159
160
	{ NULL }
};

161
162
u64 __read_mostly host_xcr0;

163
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
164

165
static int kvm_vcpu_reset(struct kvm_vcpu *vcpu);
166

167
168
169
170
171
172
173
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
{
	int i;
	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
		vcpu->arch.apf.gfns[i] = ~0;
}

174
175
176
177
178
static void kvm_on_user_return(struct user_return_notifier *urn)
{
	unsigned slot;
	struct kvm_shared_msrs *locals
		= container_of(urn, struct kvm_shared_msrs, urn);
179
	struct kvm_shared_msr_values *values;
180
181

	for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
182
183
184
185
		values = &locals->values[slot];
		if (values->host != values->curr) {
			wrmsrl(shared_msrs_global.msrs[slot], values->host);
			values->curr = values->host;
186
187
188
189
190
191
		}
	}
	locals->registered = false;
	user_return_notifier_unregister(urn);
}

192
static void shared_msr_update(unsigned slot, u32 msr)
193
{
194
	struct kvm_shared_msrs *smsr;
195
196
	u64 value;

197
198
199
200
201
202
203
204
205
206
207
208
209
210
	smsr = &__get_cpu_var(shared_msrs);
	/* only read, and nobody should modify it at this time,
	 * so don't need lock */
	if (slot >= shared_msrs_global.nr) {
		printk(KERN_ERR "kvm: invalid MSR slot!");
		return;
	}
	rdmsrl_safe(msr, &value);
	smsr->values[slot].host = value;
	smsr->values[slot].curr = value;
}

void kvm_define_shared_msr(unsigned slot, u32 msr)
{
211
212
	if (slot >= shared_msrs_global.nr)
		shared_msrs_global.nr = slot + 1;
213
214
215
	shared_msrs_global.msrs[slot] = msr;
	/* we need ensured the shared_msr_global have been updated */
	smp_wmb();
216
217
218
219
220
221
222
223
}
EXPORT_SYMBOL_GPL(kvm_define_shared_msr);

static void kvm_shared_msr_cpu_online(void)
{
	unsigned i;

	for (i = 0; i < shared_msrs_global.nr; ++i)
224
		shared_msr_update(i, shared_msrs_global.msrs[i]);
225
226
}

227
void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
228
229
230
{
	struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);

231
	if (((value ^ smsr->values[slot].curr) & mask) == 0)
232
		return;
233
234
	smsr->values[slot].curr = value;
	wrmsrl(shared_msrs_global.msrs[slot], value);
235
236
237
238
239
240
241
242
	if (!smsr->registered) {
		smsr->urn.on_user_return = kvm_on_user_return;
		user_return_notifier_register(&smsr->urn);
		smsr->registered = true;
	}
}
EXPORT_SYMBOL_GPL(kvm_set_shared_msr);

243
244
245
246
247
248
249
250
static void drop_user_return_notifiers(void *ignore)
{
	struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);

	if (smsr->registered)
		kvm_on_user_return(&smsr->urn);
}

251
252
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
{
253
	return vcpu->arch.apic_base;
254
255
256
257
258
259
}
EXPORT_SYMBOL_GPL(kvm_get_apic_base);

void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
{
	/* TODO: reserve bits check */
260
	kvm_lapic_set_base(vcpu, data);
261
262
263
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);

264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
#define EXCPT_BENIGN		0
#define EXCPT_CONTRIBUTORY	1
#define EXCPT_PF		2

static int exception_class(int vector)
{
	switch (vector) {
	case PF_VECTOR:
		return EXCPT_PF;
	case DE_VECTOR:
	case TS_VECTOR:
	case NP_VECTOR:
	case SS_VECTOR:
	case GP_VECTOR:
		return EXCPT_CONTRIBUTORY;
	default:
		break;
	}
	return EXCPT_BENIGN;
}

static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
286
287
		unsigned nr, bool has_error, u32 error_code,
		bool reinject)
288
289
290
291
{
	u32 prev_nr;
	int class1, class2;

292
293
	kvm_make_request(KVM_REQ_EVENT, vcpu);

294
295
296
297
298
299
	if (!vcpu->arch.exception.pending) {
	queue:
		vcpu->arch.exception.pending = true;
		vcpu->arch.exception.has_error_code = has_error;
		vcpu->arch.exception.nr = nr;
		vcpu->arch.exception.error_code = error_code;
300
		vcpu->arch.exception.reinject = reinject;
301
302
303
304
305
306
307
		return;
	}

	/* to check exception */
	prev_nr = vcpu->arch.exception.nr;
	if (prev_nr == DF_VECTOR) {
		/* triple fault -> shutdown */
308
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
		return;
	}
	class1 = exception_class(prev_nr);
	class2 = exception_class(nr);
	if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
		|| (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
		/* generate double fault per SDM Table 5-5 */
		vcpu->arch.exception.pending = true;
		vcpu->arch.exception.has_error_code = true;
		vcpu->arch.exception.nr = DF_VECTOR;
		vcpu->arch.exception.error_code = 0;
	} else
		/* replace previous exception with a new one in a hope
		   that instruction re-execution will regenerate lost
		   exception */
		goto queue;
}

327
328
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
329
	kvm_multiple_exception(vcpu, nr, false, 0, false);
330
331
332
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);

333
334
335
336
337
338
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
	kvm_multiple_exception(vcpu, nr, false, 0, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);

339
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
340
{
341
342
343
344
345
346
	if (err)
		kvm_inject_gp(vcpu, 0);
	else
		kvm_x86_ops->skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
347

348
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
349
350
{
	++vcpu->stat.pf_guest;
351
352
	vcpu->arch.cr2 = fault->address;
	kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
353
}
Nadav Har'El's avatar
Nadav Har'El committed
354
EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
355

356
void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
357
{
358
359
	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
360
	else
361
		vcpu->arch.mmu.inject_page_fault(vcpu, fault);
362
363
}

364
365
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{
Avi Kivity's avatar
Avi Kivity committed
366
367
	atomic_inc(&vcpu->arch.nmi_queued);
	kvm_make_request(KVM_REQ_NMI, vcpu);
368
369
370
}
EXPORT_SYMBOL_GPL(kvm_inject_nmi);

371
372
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
373
	kvm_multiple_exception(vcpu, nr, true, error_code, false);
374
375
376
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);

377
378
379
380
381
382
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
	kvm_multiple_exception(vcpu, nr, true, error_code, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);

383
384
385
386
387
/*
 * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
 * a #GP and return false.
 */
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
388
{
389
390
391
392
	if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
		return true;
	kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
	return false;
393
}
394
EXPORT_SYMBOL_GPL(kvm_require_cpl);
395

396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
/*
 * This function will be used to read from the physical memory of the currently
 * running guest. The difference to kvm_read_guest_page is that this function
 * can read from guest physical or from the guest's guest physical memory.
 */
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gfn_t ngfn, void *data, int offset, int len,
			    u32 access)
{
	gfn_t real_gfn;
	gpa_t ngpa;

	ngpa     = gfn_to_gpa(ngfn);
	real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
	if (real_gfn == UNMAPPED_GVA)
		return -EFAULT;

	real_gfn = gpa_to_gfn(real_gfn);

	return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);

419
420
421
422
423
424
425
int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
			       void *data, int offset, int len, u32 access)
{
	return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
				       data, offset, len, access);
}

426
427
428
/*
 * Load the pae pdptrs.  Return true is they are all valid.
 */
429
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
430
431
432
433
434
{
	gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
	unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
	int i;
	int ret;
435
	u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
436

437
438
439
	ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
				      offset * sizeof(u64), sizeof(pdpte),
				      PFERR_USER_MASK|PFERR_WRITE_MASK);
440
441
442
443
444
	if (ret < 0) {
		ret = 0;
		goto out;
	}
	for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
445
		if (is_present_gpte(pdpte[i]) &&
446
		    (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
447
448
449
450
451
452
			ret = 0;
			goto out;
		}
	}
	ret = 1;

453
	memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
Avi Kivity's avatar
Avi Kivity committed
454
455
456
457
	__set_bit(VCPU_EXREG_PDPTR,
		  (unsigned long *)&vcpu->arch.regs_avail);
	__set_bit(VCPU_EXREG_PDPTR,
		  (unsigned long *)&vcpu->arch.regs_dirty);
458
459
460
461
out:

	return ret;
}
462
EXPORT_SYMBOL_GPL(load_pdptrs);
463

464
465
static bool pdptrs_changed(struct kvm_vcpu *vcpu)
{
466
	u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
467
	bool changed = true;
468
469
	int offset;
	gfn_t gfn;
470
471
472
473
474
	int r;

	if (is_long_mode(vcpu) || !is_pae(vcpu))
		return false;

Avi Kivity's avatar
Avi Kivity committed
475
476
477
478
	if (!test_bit(VCPU_EXREG_PDPTR,
		      (unsigned long *)&vcpu->arch.regs_avail))
		return true;

479
480
	gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
	offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
481
482
	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
				       PFERR_USER_MASK | PFERR_WRITE_MASK);
483
484
	if (r < 0)
		goto out;
485
	changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
486
487
488
489
490
out:

	return changed;
}

491
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
492
{
493
494
495
496
	unsigned long old_cr0 = kvm_read_cr0(vcpu);
	unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
				    X86_CR0_CD | X86_CR0_NW;

497
498
	cr0 |= X86_CR0_ET;

499
#ifdef CONFIG_X86_64
500
501
	if (cr0 & 0xffffffff00000000UL)
		return 1;
502
503
504
#endif

	cr0 &= ~CR0_RESERVED_BITS;
505

506
507
	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
		return 1;
508

509
510
	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
		return 1;
511
512
513

	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
#ifdef CONFIG_X86_64
514
		if ((vcpu->arch.efer & EFER_LME)) {
515
516
			int cs_db, cs_l;

517
518
			if (!is_pae(vcpu))
				return 1;
519
			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
520
521
			if (cs_l)
				return 1;
522
523
		} else
#endif
524
		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
525
						 kvm_read_cr3(vcpu)))
526
			return 1;
527
528
	}

529
530
531
	if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
		return 1;

532
533
	kvm_x86_ops->set_cr0(vcpu, cr0);

534
	if ((cr0 ^ old_cr0) & X86_CR0_PG) {
535
		kvm_clear_async_pf_completion_queue(vcpu);
536
537
		kvm_async_pf_hash_reset(vcpu);
	}
538

539
540
	if ((cr0 ^ old_cr0) & update_bits)
		kvm_mmu_reset_context(vcpu);
541
542
	return 0;
}
543
EXPORT_SYMBOL_GPL(kvm_set_cr0);
544

545
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
546
{
547
	(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
548
}
549
EXPORT_SYMBOL_GPL(kvm_lmsw);
550

551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
	u64 xcr0;

	/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
	if (index != XCR_XFEATURE_ENABLED_MASK)
		return 1;
	xcr0 = xcr;
	if (kvm_x86_ops->get_cpl(vcpu) != 0)
		return 1;
	if (!(xcr0 & XSTATE_FP))
		return 1;
	if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
		return 1;
	if (xcr0 & ~host_xcr0)
		return 1;
	vcpu->arch.xcr0 = xcr0;
	vcpu->guest_xcr0_loaded = 0;
	return 0;
}

int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
	if (__kvm_set_xcr(vcpu, index, xcr)) {
		kvm_inject_gp(vcpu, 0);
		return 1;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_xcr);

582
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
583
{
584
	unsigned long old_cr4 = kvm_read_cr4(vcpu);
585
586
	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
				   X86_CR4_PAE | X86_CR4_SMEP;
587
588
	if (cr4 & CR4_RESERVED_BITS)
		return 1;
589

590
591
592
	if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
		return 1;

593
594
595
	if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
		return 1;

596
597
598
	if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
		return 1;

599
	if (is_long_mode(vcpu)) {
600
601
		if (!(cr4 & X86_CR4_PAE))
			return 1;
602
603
	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
		   && ((cr4 ^ old_cr4) & pdptr_bits)
604
605
		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
				   kvm_read_cr3(vcpu)))
606
607
		return 1;

608
609
610
611
612
613
614
615
616
	if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
		if (!guest_cpuid_has_pcid(vcpu))
			return 1;

		/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
		if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
			return 1;
	}

617
	if (kvm_x86_ops->set_cr4(vcpu, cr4))
618
		return 1;
619

620
621
	if (((cr4 ^ old_cr4) & pdptr_bits) ||
	    (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
622
		kvm_mmu_reset_context(vcpu);
623

624
	if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
Avi Kivity's avatar
Avi Kivity committed
625
		kvm_update_cpuid(vcpu);
626

627
628
	return 0;
}
629
EXPORT_SYMBOL_GPL(kvm_set_cr4);
630

631
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
632
{
633
	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
634
		kvm_mmu_sync_roots(vcpu);
635
		kvm_mmu_flush_tlb(vcpu);
636
		return 0;
637
638
	}

639
	if (is_long_mode(vcpu)) {
640
		if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) {
641
642
643
644
645
			if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS)
				return 1;
		} else
			if (cr3 & CR3_L_MODE_RESERVED_BITS)
				return 1;
646
647
	} else {
		if (is_pae(vcpu)) {
648
649
			if (cr3 & CR3_PAE_RESERVED_BITS)
				return 1;
650
651
			if (is_paging(vcpu) &&
			    !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
652
				return 1;
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
		}
		/*
		 * We don't check reserved bits in nonpae mode, because
		 * this isn't enforced, and VMware depends on this.
		 */
	}

	/*
	 * Does the new cr3 value map to physical memory? (Note, we
	 * catch an invalid cr3 even in real-mode, because it would
	 * cause trouble later on when we turn on paging anyway.)
	 *
	 * A real CPU would silently accept an invalid cr3 and would
	 * attempt to use it - with largely undefined (and often hard
	 * to debug) behavior on the guest side.
	 */
	if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
670
671
		return 1;
	vcpu->arch.cr3 = cr3;
672
	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
673
674
675
	vcpu->arch.mmu.new_cr3(vcpu);
	return 0;
}
676
EXPORT_SYMBOL_GPL(kvm_set_cr3);
677

Andre Przywara's avatar
Andre Przywara committed
678
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
679
{
680
681
	if (cr8 & CR8_RESERVED_BITS)
		return 1;
682
683
684
	if (irqchip_in_kernel(vcpu->kvm))
		kvm_lapic_set_tpr(vcpu, cr8);
	else
685
		vcpu->arch.cr8 = cr8;
686
687
	return 0;
}
688
EXPORT_SYMBOL_GPL(kvm_set_cr8);
689

690
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
691
692
693
694
{
	if (irqchip_in_kernel(vcpu->kvm))
		return kvm_lapic_get_cr8(vcpu);
	else
695
		return vcpu->arch.cr8;
696
}
697
EXPORT_SYMBOL_GPL(kvm_get_cr8);
698

699
700
701
702
703
704
705
706
707
708
709
710
static void kvm_update_dr7(struct kvm_vcpu *vcpu)
{
	unsigned long dr7;

	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
		dr7 = vcpu->arch.guest_debug_dr7;
	else
		dr7 = vcpu->arch.dr7;
	kvm_x86_ops->set_dr7(vcpu, dr7);
	vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK);
}

711
static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
712
713
714
715
716
717
718
719
{
	switch (dr) {
	case 0 ... 3:
		vcpu->arch.db[dr] = val;
		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
			vcpu->arch.eff_db[dr] = val;
		break;
	case 4:
720
721
		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
			return 1; /* #UD */
722
723
		/* fall through */
	case 6:
724
725
		if (val & 0xffffffff00000000ULL)
			return -1; /* #GP */
726
727
728
		vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
		break;
	case 5:
729
730
		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
			return 1; /* #UD */
731
732
		/* fall through */
	default: /* 7 */
733
734
		if (val & 0xffffffff00000000ULL)
			return -1; /* #GP */
735
		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
736
		kvm_update_dr7(vcpu);
737
738
739
740
741
		break;
	}

	return 0;
}
742
743
744
745
746
747
748
749
750
751
752
753
754

int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
	int res;

	res = __kvm_set_dr(vcpu, dr, val);
	if (res > 0)
		kvm_queue_exception(vcpu, UD_VECTOR);
	else if (res < 0)
		kvm_inject_gp(vcpu, 0);

	return res;
}
755
756
EXPORT_SYMBOL_GPL(kvm_set_dr);

757
static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
758
759
760
761
762
763
{
	switch (dr) {
	case 0 ... 3:
		*val = vcpu->arch.db[dr];
		break;
	case 4:
764
		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
765
766
767
768
769
770
			return 1;
		/* fall through */
	case 6:
		*val = vcpu->arch.dr6;
		break;
	case 5:
771
		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
772
773
774
775
776
777
778
779
780
			return 1;
		/* fall through */
	default: /* 7 */
		*val = vcpu->arch.dr7;
		break;
	}

	return 0;
}
781
782
783
784
785
786
787
788
789

int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
{
	if (_kvm_get_dr(vcpu, dr, val)) {
		kvm_queue_exception(vcpu, UD_VECTOR);
		return 1;
	}
	return 0;
}
790
791
EXPORT_SYMBOL_GPL(kvm_get_dr);

Avi Kivity's avatar
Avi Kivity committed
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
bool kvm_rdpmc(struct kvm_vcpu *vcpu)
{
	u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
	u64 data;
	int err;

	err = kvm_pmu_read_pmc(vcpu, ecx, &data);
	if (err)
		return err;
	kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
	kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
	return err;
}
EXPORT_SYMBOL_GPL(kvm_rdpmc);

807
808
809
810
811
/*
 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
 *
 * This list is modified at module load time to reflect the
812
813
 * capabilities of the host cpu. This capabilities test skips MSRs that are
 * kvm-specific. Those are put in the beginning of the list.
814
 */
815

816
#define KVM_SAVE_MSRS_BEGIN	10
817
static u32 msrs_to_save[] = {
818
	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
819
	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
820
	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
Glauber Costa's avatar
Glauber Costa committed
821
	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
822
	MSR_KVM_PV_EOI_EN,
823
	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
Brian Gerst's avatar
Brian Gerst committed
824
	MSR_STAR,
825
826
827
#ifdef CONFIG_X86_64
	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
828
	MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
829
830
831
832
};

static unsigned num_msrs_to_save;

Mathias Krause's avatar
Mathias Krause committed
833
static const u32 emulated_msrs[] = {
834
	MSR_IA32_TSC_ADJUST,
835
	MSR_IA32_TSCDEADLINE,
836
	MSR_IA32_MISC_ENABLE,
837
838
	MSR_IA32_MCG_STATUS,
	MSR_IA32_MCG_CTL,
839
840
};

841
static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
842
{
843
844
	u64 old_efer = vcpu->arch.efer;

845
846
	if (efer & efer_reserved_bits)
		return 1;
847
848

	if (is_paging(vcpu)
849
850
	    && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
		return 1;
851

Alexander Graf's avatar
Alexander Graf committed
852
853
854
855
	if (efer & EFER_FFXSR) {
		struct kvm_cpuid_entry2 *feat;

		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
856
857
		if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
			return 1;
Alexander Graf's avatar
Alexander Graf committed
858
859
	}

860
861
862
863
	if (efer & EFER_SVME) {
		struct kvm_cpuid_entry2 *feat;

		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
864
865
		if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
			return 1;
866
867
	}

868
	efer &= ~EFER_LMA;
869
	efer |= vcpu->arch.efer & EFER_LMA;
870

871
872
	kvm_x86_ops->set_efer(vcpu, efer);

873
874
875
876
	/* Update reserved bits */
	if ((efer ^ old_efer) & EFER_NX)
		kvm_mmu_reset_context(vcpu);

877
	return 0;
878
879
}

880
881
882
883
884
885
886
void kvm_enable_efer_bits(u64 mask)
{
       efer_reserved_bits &= ~mask;
}
EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);


887
888
889
890
891
/*
 * Writes msr value into into the appropriate "register".
 * Returns 0 on success, non-0 otherwise.
 * Assumes vcpu_load() was already called.
 */
892
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
893
{
894
	return kvm_x86_ops->set_msr(vcpu, msr);
895
896
}

897
898
899
900
901
/*
 * Adapt set_msr() to msr_io()'s calling convention
 */
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
902
903
904
905
906
907
	struct msr_data msr;

	msr.data = *data;
	msr.index = index;
	msr.host_initiated = true;
	return kvm_set_msr(vcpu, &msr);
908
909
}

910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
#ifdef CONFIG_X86_64
struct pvclock_gtod_data {
	seqcount_t	seq;

	struct { /* extract of a clocksource struct */
		int vclock_mode;
		cycle_t	cycle_last;
		cycle_t	mask;
		u32	mult;
		u32	shift;
	} clock;

	/* open coded 'struct timespec' */
	u64		monotonic_time_snsec;
	time_t		monotonic_time_sec;
};

static struct pvclock_gtod_data pvclock_gtod_data;

static void update_pvclock_gtod(struct timekeeper *tk)
{
	struct pvclock_gtod_data *vdata = &pvclock_gtod_data;

	write_seqcount_begin(&vdata->seq);

	/* copy pvclock gtod data */
	vdata->clock.vclock_mode	= tk->clock->archdata.vclock_mode;
	vdata->clock.cycle_last		= tk->clock->cycle_last;
	vdata->clock.mask		= tk->clock->mask;
	vdata->clock.mult		= tk->mult;
	vdata->clock.shift		= tk->shift;

	vdata->monotonic_time_sec	= tk->xtime_sec
					+ tk->wall_to_monotonic.tv_sec;
	vdata->monotonic_time_snsec	= tk->xtime_nsec
					+ (tk->wall_to_monotonic.tv_nsec
						<< tk->shift);
	while (vdata->monotonic_time_snsec >=
					(((u64)NSEC_PER_SEC) << tk->shift)) {
		vdata->monotonic_time_snsec -=
					((u64)NSEC_PER_SEC) << tk->shift;
		vdata->monotonic_time_sec++;
	}

	write_seqcount_end(&vdata->seq);
}
#endif


959
960
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
{
961
962
	int version;
	int r;
963
	struct pvclock_wall_clock wc;
964
	struct timespec boot;
965
966
967
968

	if (!wall_clock)
		return;

969
970
971
972
973
974
975
976
	r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
	if (r)
		return;

	if (version & 1)
		++version;  /* first time write, random junk */

	++version;
977
978
979

	kvm_write_guest(kvm, wall_clock, &version, sizeof(version));

980
981
	/*
	 * The guest calculates current wall clock time by adding
982
	 * system time (updated by kvm_guest_time_update below) to the
983
984
985
	 * wall clock specified here.  guest system time equals host
	 * system time for us, thus we must fill in host boot time here.
	 */
986
	getboottime(&boot);
987

988
989
990
991
	if (kvm->arch.kvmclock_offset) {
		struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
		boot = timespec_sub(boot, ts);
	}
992
993
994
	wc.sec = boot.tv_sec;
	wc.nsec = boot.tv_nsec;
	wc.version = version;
995
996
997
998
999
1000

	kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));

	version++;
	kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
}
For faster browsing, not all history is shown. View entire blame