kvm_host.h 31.9 KB
Newer Older
1
2
#ifndef __KVM_HOST_H
#define __KVM_HOST_H
Avi Kivity's avatar
Avi Kivity committed
3
4
5
6
7
8
9

/*
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 */

#include <linux/types.h>
10
#include <linux/hardirq.h>
Avi Kivity's avatar
Avi Kivity committed
11
12
13
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
Markus Rechberger's avatar
Markus Rechberger committed
14
15
#include <linux/signal.h>
#include <linux/sched.h>
16
#include <linux/bug.h>
Avi Kivity's avatar
Avi Kivity committed
17
#include <linux/mm.h>
18
#include <linux/mmu_notifier.h>
19
#include <linux/preempt.h>
20
#include <linux/msi.h>
21
#include <linux/slab.h>
22
#include <linux/rcupdate.h>
23
#include <linux/ratelimit.h>
24
#include <linux/err.h>
25
#include <linux/irqflags.h>
26
#include <linux/context_tracking.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
27
#include <asm/signal.h>
Avi Kivity's avatar
Avi Kivity committed
28
29

#include <linux/kvm.h>
Ingo Molnar's avatar
Ingo Molnar committed
30
#include <linux/kvm_para.h>
Avi Kivity's avatar
Avi Kivity committed
31

32
#include <linux/kvm_types.h>
33

34
#include <asm/kvm_host.h>
35

36
37
38
39
40
41
/*
 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
 * in kvm, other bits are visible for userspace which are defined in
 * include/linux/kvm_h.
 */
#define KVM_MEMSLOT_INVALID	(1UL << 16)
42
#define KVM_MEMSLOT_INCOHERENT	(1UL << 17)
43

44
45
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS	2
Avi Kivity's avatar
Avi Kivity committed
46

47
48
49
50
#ifndef KVM_ADDRESS_SPACE_NUM
#define KVM_ADDRESS_SPACE_NUM	1
#endif

51
52
/*
 * For the normal pfn, the highest 12 bits should be zero,
53
54
 * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
 * mask bit 63 to indicate the noslot pfn.
55
 */
56
57
58
#define KVM_PFN_ERR_MASK	(0x7ffULL << 52)
#define KVM_PFN_ERR_NOSLOT_MASK	(0xfffULL << 52)
#define KVM_PFN_NOSLOT		(0x1ULL << 63)
59
60
61

#define KVM_PFN_ERR_FAULT	(KVM_PFN_ERR_MASK)
#define KVM_PFN_ERR_HWPOISON	(KVM_PFN_ERR_MASK + 1)
62
#define KVM_PFN_ERR_RO_FAULT	(KVM_PFN_ERR_MASK + 2)
63

64
65
66
67
/*
 * error pfns indicate that the gfn is in slot but faild to
 * translate it to pfn on host.
 */
68
static inline bool is_error_pfn(pfn_t pfn)
69
{
70
	return !!(pfn & KVM_PFN_ERR_MASK);
71
72
}

73
74
75
76
77
78
/*
 * error_noslot pfns indicate that the gfn can not be
 * translated to pfn - it is not in slot or failed to
 * translate it to pfn.
 */
static inline bool is_error_noslot_pfn(pfn_t pfn)
79
{
80
	return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
81
82
}

83
84
/* noslot pfn indicates that the gfn is not in slot. */
static inline bool is_noslot_pfn(pfn_t pfn)
85
{
86
	return pfn == KVM_PFN_NOSLOT;
87
88
}

89
90
91
92
93
94
/*
 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
 * provide own defines and kvm_is_error_hva
 */
#ifndef KVM_HVA_ERR_BAD

95
96
#define KVM_HVA_ERR_BAD		(PAGE_OFFSET)
#define KVM_HVA_ERR_RO_BAD	(PAGE_OFFSET + PAGE_SIZE)
97
98
99

static inline bool kvm_is_error_hva(unsigned long addr)
{
100
	return addr >= PAGE_OFFSET;
101
102
}

103
104
#endif

105
106
#define KVM_ERR_PTR_BAD_PAGE	(ERR_PTR(-ENOENT))

107
static inline bool is_error_page(struct page *page)
108
109
110
111
{
	return IS_ERR(page);
}

112
113
114
/*
 * vcpu->requests bit members
 */
115
#define KVM_REQ_TLB_FLUSH          0
116
#define KVM_REQ_MIGRATE_TIMER      1
117
#define KVM_REQ_REPORT_TPR_ACCESS  2
118
#define KVM_REQ_MMU_RELOAD         3
119
#define KVM_REQ_TRIPLE_FAULT       4
120
#define KVM_REQ_PENDING_TIMER      5
121
#define KVM_REQ_UNHALT             6
122
#define KVM_REQ_MMU_SYNC           7
123
#define KVM_REQ_CLOCK_UPDATE       8
124
#define KVM_REQ_KICK               9
125
#define KVM_REQ_DEACTIVATE_FPU    10
126
#define KVM_REQ_EVENT             11
127
#define KVM_REQ_APF_HALT          12
Glauber Costa's avatar
Glauber Costa committed
128
#define KVM_REQ_STEAL_UPDATE      13
Avi Kivity's avatar
Avi Kivity committed
129
#define KVM_REQ_NMI               14
130
131
132
133
134
135
136
#define KVM_REQ_PMU               15
#define KVM_REQ_PMI               16
#define KVM_REQ_WATCHDOG          17
#define KVM_REQ_MASTERCLOCK_UPDATE 18
#define KVM_REQ_MCLOCK_INPROGRESS 19
#define KVM_REQ_EPR_EXIT          20
#define KVM_REQ_SCAN_IOAPIC       21
137
#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
138
139
#define KVM_REQ_ENABLE_IBS        23
#define KVM_REQ_DISABLE_IBS       24
140
#define KVM_REQ_APIC_PAGE_RELOAD  25
141
#define KVM_REQ_SMI               26
142
#define KVM_REQ_HV_CRASH          27
Avi Kivity's avatar
Avi Kivity committed
143

144
145
#define KVM_USERSPACE_IRQ_SOURCE_ID		0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
146

147
extern struct kmem_cache *kvm_vcpu_cache;
Avi Kivity's avatar
Avi Kivity committed
148

149
extern spinlock_t kvm_lock;
150
151
extern struct list_head vm_list;

152
153
154
155
156
157
struct kvm_io_range {
	gpa_t addr;
	int len;
	struct kvm_io_device *dev;
};

158
#define NR_IOBUS_DEVS 1000
159

160
struct kvm_io_bus {
161
162
	int dev_count;
	int ioeventfd_count;
163
	struct kvm_io_range range[];
164
165
};

Marcelo Tosatti's avatar
Marcelo Tosatti committed
166
167
168
enum kvm_bus {
	KVM_MMIO_BUS,
	KVM_PIO_BUS,
169
	KVM_VIRTIO_CCW_NOTIFY_BUS,
170
	KVM_FAST_MMIO_BUS,
Marcelo Tosatti's avatar
Marcelo Tosatti committed
171
172
173
	KVM_NR_BUSES
};

174
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
Marcelo Tosatti's avatar
Marcelo Tosatti committed
175
		     int len, const void *val);
176
177
178
179
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
			    gpa_t addr, int len, const void *val, long cookie);
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
		    int len, void *val);
180
181
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
			    int len, struct kvm_io_device *dev);
Marcelo Tosatti's avatar
Marcelo Tosatti committed
182
183
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
			      struct kvm_io_device *dev);
184

185
186
187
188
189
190
191
192
193
194
#ifdef CONFIG_KVM_ASYNC_PF
struct kvm_async_pf {
	struct work_struct work;
	struct list_head link;
	struct list_head queue;
	struct kvm_vcpu *vcpu;
	struct mm_struct *mm;
	gva_t gva;
	unsigned long addr;
	struct kvm_arch_async_pf arch;
195
	bool   wakeup_all;
196
197
198
199
};

void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
200
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
201
		       struct kvm_arch_async_pf *arch);
202
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
203
204
#endif

205
206
207
enum {
	OUTSIDE_GUEST_MODE,
	IN_GUEST_MODE,
208
209
	EXITING_GUEST_MODE,
	READING_SHADOW_PAGE_TABLES,
210
211
};

Avi Kivity's avatar
Avi Kivity committed
212
213
214
215
216
217
218
219
220
221
/*
 * Sometimes a large or cross-page mmio needs to be broken up into separate
 * exits for userspace servicing.
 */
struct kvm_mmio_fragment {
	gpa_t gpa;
	void *data;
	unsigned len;
};

222
223
struct kvm_vcpu {
	struct kvm *kvm;
224
#ifdef CONFIG_PREEMPT_NOTIFIERS
225
	struct preempt_notifier preempt_notifier;
226
#endif
227
	int cpu;
228
	int vcpu_id;
229
230
	int srcu_idx;
	int mode;
231
	unsigned long requests;
Jan Kiszka's avatar
Jan Kiszka committed
232
	unsigned long guest_debug;
233
234
235

	struct mutex mutex;
	struct kvm_run *run;
236

237
	int fpu_active;
238
	int guest_fpu_loaded, guest_xcr0_loaded;
239
	unsigned char fpu_counter;
240
	wait_queue_head_t wq;
241
	struct pid *pid;
242
243
244
	int sigset_active;
	sigset_t sigset;
	struct kvm_vcpu_stat stat;
Wanpeng Li's avatar
Wanpeng Li committed
245
	unsigned int halt_poll_ns;
246

247
#ifdef CONFIG_HAS_IOMEM
248
249
250
	int mmio_needed;
	int mmio_read_completed;
	int mmio_is_write;
Avi Kivity's avatar
Avi Kivity committed
251
252
253
	int mmio_cur_fragment;
	int mmio_nr_fragments;
	struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
254
#endif
Avi Kivity's avatar
Avi Kivity committed
255

256
257
258
259
260
261
262
263
264
#ifdef CONFIG_KVM_ASYNC_PF
	struct {
		u32 queued;
		struct list_head queue;
		struct list_head done;
		spinlock_t lock;
	} async_pf;
#endif

265
266
267
268
269
270
271
272
273
274
275
276
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
	/*
	 * Cpu relax intercept or pause loop exit optimization
	 * in_spin_loop: set when a vcpu does a pause loop exit
	 *  or cpu relax intercepted.
	 * dy_eligible: indicates whether vcpu is eligible for directed yield.
	 */
	struct {
		bool in_spin_loop;
		bool dy_eligible;
	} spin_loop;
#endif
277
	bool preempted;
278
279
280
	struct kvm_vcpu_arch arch;
};

281
282
283
284
285
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
}

286
287
288
289
290
291
/*
 * Some of the bitops functions do not support too long bitmaps.
 * This number must be determined not to exceed such limits.
 */
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)

Avi Kivity's avatar
Avi Kivity committed
292
293
294
295
struct kvm_memory_slot {
	gfn_t base_gfn;
	unsigned long npages;
	unsigned long *dirty_bitmap;
296
	struct kvm_arch_memory_slot arch;
297
	unsigned long userspace_addr;
298
	u32 flags;
299
	short id;
Avi Kivity's avatar
Avi Kivity committed
300
301
};

302
303
304
305
306
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
{
	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
}

307
308
309
310
311
312
313
314
struct kvm_s390_adapter_int {
	u64 ind_addr;
	u64 summary_addr;
	u64 ind_offset;
	u32 summary_offset;
	u32 adapter_id;
};

315
316
struct kvm_kernel_irq_routing_entry {
	u32 gsi;
317
	u32 type;
318
	int (*set)(struct kvm_kernel_irq_routing_entry *e,
319
320
		   struct kvm *kvm, int irq_source_id, int level,
		   bool line_status);
321
322
323
324
325
	union {
		struct {
			unsigned irqchip;
			unsigned pin;
		} irqchip;
Sheng Yang's avatar
Sheng Yang committed
326
		struct msi_msg msi;
327
		struct kvm_s390_adapter_int adapter;
328
	};
329
330
331
	struct hlist_node link;
};

332
333
334
335
#ifndef KVM_PRIVATE_MEM_SLOTS
#define KVM_PRIVATE_MEM_SLOTS 0
#endif

336
#ifndef KVM_MEM_SLOTS_NUM
337
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
338
339
#endif

340
341
342
343
344
345
346
#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
{
	return 0;
}
#endif

347
348
349
350
351
/*
 * Note:
 * memslots are not sorted by id anymore, please use id_to_memslot()
 * to get the memslot by its id.
 */
352
struct kvm_memslots {
353
	u64 generation;
354
	struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
355
	/* The mapping table from slot id to the index in memslots[]. */
356
	short id_to_index[KVM_MEM_SLOTS_NUM];
357
	atomic_t lru_slot;
358
	int used_slots;
359
360
};

Avi Kivity's avatar
Avi Kivity committed
361
struct kvm {
362
	spinlock_t mmu_lock;
363
	struct mutex slots_lock;
364
	struct mm_struct *mm; /* userspace tied to this vm */
365
	struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
366
	struct srcu_struct srcu;
367
	struct srcu_struct irq_srcu;
368
	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
369
	atomic_t online_vcpus;
370
	int last_boosted_vcpu;
371
	struct list_head vm_list;
372
	struct mutex lock;
Marcelo Tosatti's avatar
Marcelo Tosatti committed
373
	struct kvm_io_bus *buses[KVM_NR_BUSES];
Gregory Haskins's avatar
Gregory Haskins committed
374
375
376
377
#ifdef CONFIG_HAVE_KVM_EVENTFD
	struct {
		spinlock_t        lock;
		struct list_head  items;
378
379
		struct list_head  resampler_list;
		struct mutex      resampler_lock;
Gregory Haskins's avatar
Gregory Haskins committed
380
	} irqfds;
Gregory Haskins's avatar
Gregory Haskins committed
381
	struct list_head ioeventfds;
Gregory Haskins's avatar
Gregory Haskins committed
382
#endif
383
	struct kvm_vm_stat stat;
384
	struct kvm_arch arch;
Izik Eidus's avatar
Izik Eidus committed
385
	atomic_t users_count;
386
387
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
388
389
	spinlock_t ring_lock;
	struct list_head coalesced_zones;
390
#endif
391

392
	struct mutex irq_lock;
393
#ifdef CONFIG_HAVE_KVM_IRQCHIP
394
	/*
395
	 * Update side is protected by irq_lock.
396
	 */
Arnd Bergmann's avatar
Arnd Bergmann committed
397
	struct kvm_irq_routing_table __rcu *irq_routing;
398
399
#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
400
	struct hlist_head irq_ack_notifier_list;
401
402
#endif

403
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
404
405
406
407
	struct mmu_notifier mmu_notifier;
	unsigned long mmu_notifier_seq;
	long mmu_notifier_count;
#endif
408
	long tlbs_dirty;
409
	struct list_head devices;
Avi Kivity's avatar
Avi Kivity committed
410
411
};

412
413
414
415
416
417
418
419
420
#define kvm_err(fmt, ...) \
	pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_info(fmt, ...) \
	pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_debug(fmt, ...) \
	pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_pr_unimpl(fmt, ...) \
	pr_err_ratelimited("kvm [%i]: " fmt, \
			   task_tgid_nr(current), ## __VA_ARGS__)
421

422
423
424
/* The guest did something we don't support. */
#define vcpu_unimpl(vcpu, fmt, ...)					\
	kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
Avi Kivity's avatar
Avi Kivity committed
425

426
427
428
#define vcpu_debug(vcpu, fmt, ...)					\
	kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)

429
430
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
431
432
433
434
	/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
	 * the caller has read kvm->online_vcpus before (as is the case
	 * for kvm_for_each_vcpu, for example).
	 */
435
436
437
438
439
	smp_rmb();
	return kvm->vcpus[i];
}

#define kvm_for_each_vcpu(idx, vcpup, kvm) \
440
441
442
443
	for (idx = 0; \
	     idx < atomic_read(&kvm->online_vcpus) && \
	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
	     idx++)
444

445
446
#define kvm_for_each_memslot(memslot, slots)	\
	for (memslot = &slots->memslots[0];	\
447
448
	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
		memslot++)
449

450
451
452
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);

453
int __must_check vcpu_load(struct kvm_vcpu *vcpu);
454
455
void vcpu_put(struct kvm_vcpu *vcpu);

456
457
458
459
460
461
462
463
#ifdef __KVM_HAVE_IOAPIC
void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
#else
static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
{
}
#endif

464
#ifdef CONFIG_HAVE_KVM_IRQFD
465
466
467
468
469
470
471
472
473
474
475
476
int kvm_irqfd_init(void);
void kvm_irqfd_exit(void);
#else
static inline int kvm_irqfd_init(void)
{
	return 0;
}

static inline void kvm_irqfd_exit(void)
{
}
#endif
477
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
478
		  struct module *module);
479
void kvm_exit(void);
Avi Kivity's avatar
Avi Kivity committed
480

Izik Eidus's avatar
Izik Eidus committed
481
482
483
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);

484
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
485
{
486
	return rcu_dereference_check(kvm->memslots[as_id],
487
488
489
490
			srcu_read_lock_held(&kvm->srcu)
			|| lockdep_is_held(&kvm->slots_lock));
}

491
492
493
494
495
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
{
	return __kvm_memslots(kvm, 0);
}

496
497
static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
{
498
499
500
	int as_id = kvm_arch_vcpu_memslots_id(vcpu);

	return __kvm_memslots(vcpu->kvm, as_id);
501
502
}

503
504
505
static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots *slots, int id)
{
506
507
	int index = slots->id_to_index[id];
	struct kvm_memory_slot *slot;
508

509
	slot = &slots->memslots[index];
510

511
512
	WARN_ON(slot->id != id);
	return slot;
513
514
}

515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
/*
 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
 * - create a new memory slot
 * - delete an existing memory slot
 * - modify an existing memory slot
 *   -- move it in the guest physical memory space
 *   -- just change its flags
 *
 * Since flags can be changed by some of these operations, the following
 * differentiation is the best we can do for __kvm_set_memory_region():
 */
enum kvm_mr_change {
	KVM_MR_CREATE,
	KVM_MR_DELETE,
	KVM_MR_MOVE,
	KVM_MR_FLAGS_ONLY,
};

533
int kvm_set_memory_region(struct kvm *kvm,
534
			  const struct kvm_userspace_memory_region *mem);
535
int __kvm_set_memory_region(struct kvm *kvm,
536
			    const struct kvm_userspace_memory_region *mem);
537
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
538
			   struct kvm_memory_slot *dont);
539
540
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
			    unsigned long npages);
541
void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
542
543
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				struct kvm_memory_slot *memslot,
544
				const struct kvm_userspace_memory_region *mem,
545
				enum kvm_mr_change change);
546
void kvm_arch_commit_memory_region(struct kvm *kvm,
547
				const struct kvm_userspace_memory_region *mem,
548
				const struct kvm_memory_slot *old,
549
				const struct kvm_memory_slot *new,
550
				enum kvm_mr_change change);
551
bool kvm_largepages_enabled(void);
552
void kvm_disable_largepages(void);
553
554
555
556
557
/* flush all memory translations */
void kvm_arch_flush_shadow_all(struct kvm *kvm);
/* flush memory translations pointing to 'slot' */
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
				   struct kvm_memory_slot *slot);
558

559
560
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
			    struct page **pages, int nr_pages);
561

Avi Kivity's avatar
Avi Kivity committed
562
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
Marcelo Tosatti's avatar
Marcelo Tosatti committed
563
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
564
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
565
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
566
567
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
				      bool *writable);
568
569
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
570
571
void kvm_set_page_accessed(struct page *page);

572
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
573
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
574
575
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
		      bool *writable);
576
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
577
pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
578
579
pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
			   bool *async, bool write_fault, bool *writable);
580

581
582
583
584
585
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
void kvm_set_pfn_accessed(pfn_t pfn);
void kvm_get_pfn(pfn_t pfn);

586
587
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len);
588
589
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
			  unsigned long len);
590
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
591
592
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len);
593
594
595
596
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
			 int offset, int len);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len);
597
598
599
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
600
			      gpa_t gpa, unsigned long len);
601
602
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
Avi Kivity's avatar
Avi Kivity committed
603
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
604
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
605
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
Avi Kivity's avatar
Avi Kivity committed
606
607
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);

608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
			     int len);
int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
			       unsigned long len);
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
			unsigned long len);
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
			      int offset, int len);
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
			 unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);

627
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
628
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
629
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
Zhai, Edwin's avatar
Zhai, Edwin committed
630
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
631
632
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
633

634
void kvm_flush_remote_tlbs(struct kvm *kvm);
635
void kvm_reload_remote_mmus(struct kvm *kvm);
636
void kvm_make_mclock_inprogress_request(struct kvm *kvm);
637
void kvm_make_scan_ioapic_request(struct kvm *kvm);
638
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
Avi Kivity's avatar
Avi Kivity committed
639

640
641
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg);
642
643
long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg);
644
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
645

646
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
647

648
649
int kvm_get_dirty_log(struct kvm *kvm,
			struct kvm_dirty_log *log, int *is_dirty);
650
651
652
653

int kvm_get_dirty_log_protect(struct kvm *kvm,
			struct kvm_dirty_log *log, bool *is_dirty);

654
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
655
656
657
658
					struct kvm_memory_slot *slot,
					gfn_t gfn_offset,
					unsigned long mask);

659
660
661
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
				struct kvm_dirty_log *log);

662
663
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
			bool line_status);
664
665
long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg);
666

667
668
669
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);

670
671
672
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				    struct kvm_translation *tr);

673
674
675
676
677
678
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs);
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs);
679
680
681
682
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state);
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state);
Jan Kiszka's avatar
Jan Kiszka committed
683
684
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg);
685
686
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);

687
688
int kvm_arch_init(void *opaque);
void kvm_arch_exit(void);
689

690
691
692
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);

Radim Krčmář's avatar
Radim Krčmář committed
693
694
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);

695
696
697
698
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
699
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
700
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
701
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
702

703
704
int kvm_arch_hardware_enable(void);
void kvm_arch_hardware_disable(void);
705
706
707
int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
708
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
709
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
710

711
712
void *kvm_kvzalloc(unsigned long size);

713
714
715
716
717
718
719
720
721
722
723
724
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
}

static inline void kvm_arch_free_vm(struct kvm *kvm)
{
	kfree(kvm);
}
#endif

725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
#else
static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
{
}

static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
{
}

static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
{
	return false;
}
#endif
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
void kvm_arch_start_assignment(struct kvm *kvm);
void kvm_arch_end_assignment(struct kvm *kvm);
bool kvm_arch_has_assigned_device(struct kvm *kvm);
#else
static inline void kvm_arch_start_assignment(struct kvm *kvm)
{
}

static inline void kvm_arch_end_assignment(struct kvm *kvm)
{
}

static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
	return false;
}
#endif
761

762
763
static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{
764
765
766
#ifdef __KVM_HAVE_ARCH_WQP
	return vcpu->arch.wqp;
#else
767
768
	return &vcpu->wq;
#endif
769
}
770

771
772
773
774
775
776
777
778
779
780
781
782
783
784
#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
/*
 * returns true if the virtual interrupt controller is initialized and
 * ready to accept virtual IRQ. On some architectures the virtual interrupt
 * controller is dynamically instantiated and this is not always true.
 */
bool kvm_arch_intc_initialized(struct kvm *kvm);
#else
static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
{
	return true;
}
#endif

785
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
786
void kvm_arch_destroy_vm(struct kvm *kvm);
787
void kvm_arch_sync_events(struct kvm *kvm);
788

789
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
790
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
791

792
bool kvm_is_reserved_pfn(pfn_t pfn);
793

794
795
796
797
798
799
struct kvm_irq_ack_notifier {
	struct hlist_node link;
	unsigned gsi;
	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
};

800
801
802
int kvm_irq_map_gsi(struct kvm *kvm,
		    struct kvm_kernel_irq_routing_entry *entries, int gsi);
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
803

804
805
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
		bool line_status);
806
int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
807
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
808
		int irq_source_id, int level, bool line_status);
809
bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
810
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
811
812
void kvm_register_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian);
813
814
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian);
815
816
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
817

818
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
819
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
820
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
821
#else
822
static inline int kvm_iommu_map_pages(struct kvm *kvm,
823
				      struct kvm_memory_slot *slot)
824
825
826
827
{
	return 0;
}

828
829
830
831
static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
					 struct kvm_memory_slot *slot)
{
}
832
#endif
833

834
835
/* must be called with irqs disabled */
static inline void __kvm_guest_enter(void)
836
837
{
	guest_enter();
838
839
	/* KVM does not hold any references to rcu protected data when it
	 * switches CPU into a guest mode. In fact switching to a guest mode
Michael S. Tsirkin's avatar
Michael S. Tsirkin committed
840
	 * is very similar to exiting to userspace from rcu point of view. In
841
842
843
844
	 * addition CPU may stay in a guest mode for quite a long time (up to
	 * one time slice). Lets treat guest mode as quiescent state, just like
	 * we do with user-mode execution.
	 */
845
846
	if (!context_tracking_cpu_is_enabled())
		rcu_virt_note_context_switch(smp_processor_id());
847
848
}

849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
/* must be called with irqs disabled */
static inline void __kvm_guest_exit(void)
{
	guest_exit();
}

static inline void kvm_guest_enter(void)
{
	unsigned long flags;

	local_irq_save(flags);
	__kvm_guest_enter();
	local_irq_restore(flags);
}

864
865
static inline void kvm_guest_exit(void)
{
866
867
868
	unsigned long flags;

	local_irq_save(flags);
869
	__kvm_guest_exit();
870
	local_irq_restore(flags);
871
872
}

873
874
875
876
877
878
879
880
881
/*
 * search_memslots() and __gfn_to_memslot() are here because they are
 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
 * gfn_to_memslot() itself isn't here as an inline because that would
 * bloat other code too much.
 */
static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
{
882
	int start = 0, end = slots->used_slots;
883
	int slot = atomic_read(&slots->lru_slot);
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
	struct kvm_memory_slot *memslots = slots->memslots;

	if (gfn >= memslots[slot].base_gfn &&
	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
		return &memslots[slot];

	while (start < end) {
		slot = start + (end - start) / 2;

		if (gfn >= memslots[slot].base_gfn)
			end = slot;
		else
			start = slot + 1;
	}

	if (gfn >= memslots[start].base_gfn &&
	    gfn < memslots[start].base_gfn + memslots[start].npages) {
		atomic_set(&slots->lru_slot, start);
		return &memslots[start];
	}
904
905
906
907
908
909
910
911
912
913

	return NULL;
}

static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
{
	return search_memslots(slots, gfn);
}

914
915
916
917
918
919
static inline unsigned long
__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
}

920
921
922
923
924
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
{
	return gfn_to_memslot(kvm, gfn)->id;
}

925
926
static inline gfn_t
hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
927
{
928
929
930
	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;

	return slot->base_gfn + gfn_offset;
931
932
}

Avi Kivity's avatar
Avi Kivity committed
933
934
935
936
static inline gpa_t gfn_to_gpa(gfn_t gfn)
{
	return (gpa_t)gfn << PAGE_SHIFT;
}
Avi Kivity's avatar
Avi Kivity committed
937

938
939
940
941
942
static inline gfn_t gpa_to_gfn(gpa_t gpa)
{
	return (gfn_t)(gpa >> PAGE_SHIFT);
}

943
944
945
946
947
static inline hpa_t pfn_to_hpa(pfn_t pfn)
{
	return (hpa_t)pfn << PAGE_SHIFT;
}

948
949
950
951
952
953
954
static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
{
	unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));

	return kvm_is_error_hva(hva);
}

Marcelo Tosatti's avatar
Marcelo Tosatti committed
955
static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
956
957
958
959
{
	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
}

960
961
962
963
964
enum kvm_stat_kind {
	KVM_STAT_VM,
	KVM_STAT_VCPU,
};

965
966
967
struct kvm_stats_debugfs_item {
	const char *name;
	int offset;
968
	enum kvm_stat_kind kind;
969
970
971
	struct dentry *dentry;
};
extern struct kvm_stats_debugfs_item debugfs_entries[];
972
extern struct dentry *kvm_debugfs_dir;
973

974
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
975
static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
976
{
977
	if (unlikely(kvm->mmu_notifier_count))
978
979
		return 1;
	/*
980
981
982
983
984
985
986
987
	 * Ensure the read of mmu_notifier_count happens before the read
	 * of mmu_notifier_seq.  This interacts with the smp_wmb() in
	 * mmu_notifier_invalidate_range_end to make sure that the caller
	 * either sees the old (non-zero) value of mmu_notifier_count or
	 * the new (incremented) value of mmu_notifier_seq.
	 * PowerPC Book3s HV KVM calls this under a per-page lock
	 * rather than under kvm->mmu_lock, for scalability, so
	 * can't rely on kvm->mmu_lock to keep things ordered.
988
	 */
989
	smp_rmb();
990
	if (kvm->mmu_notifier_seq != mmu_seq)
991
992
993
994
995
		return 1;
	return 0;
}
#endif