KVM: VMX: Optimize %ds, %es reload
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / kvm_host.h
CommitLineData
edf88417
AK
1#ifndef __KVM_HOST_H
2#define __KVM_HOST_H
6aa8b732
AK
3
4/*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/types.h>
e56a7a28 10#include <linux/hardirq.h>
6aa8b732
AK
11#include <linux/list.h>
12#include <linux/mutex.h>
13#include <linux/spinlock.h>
06ff0d37
MR
14#include <linux/signal.h>
15#include <linux/sched.h>
187f1882 16#include <linux/bug.h>
6aa8b732 17#include <linux/mm.h>
b297e672 18#include <linux/mmu_notifier.h>
15ad7146 19#include <linux/preempt.h>
0937c48d 20#include <linux/msi.h>
d89f5eff 21#include <linux/slab.h>
bd2b53b2 22#include <linux/rcupdate.h>
bd80158a 23#include <linux/ratelimit.h>
e8edc6e0 24#include <asm/signal.h>
6aa8b732 25
6aa8b732 26#include <linux/kvm.h>
102d8325 27#include <linux/kvm_para.h>
6aa8b732 28
edf88417 29#include <linux/kvm_types.h>
d77a39d9 30
edf88417 31#include <asm/kvm_host.h>
d657a98e 32
cef4dea0
AK
33#ifndef KVM_MMIO_SIZE
34#define KVM_MMIO_SIZE 8
35#endif
36
f78146b0
AK
37/*
38 * If we support unaligned MMIO, at most one fragment will be split into two:
39 */
40#ifdef KVM_UNALIGNED_MMIO
41# define KVM_EXTRA_MMIO_FRAGMENTS 1
42#else
43# define KVM_EXTRA_MMIO_FRAGMENTS 0
44#endif
45
46#define KVM_USER_MMIO_SIZE 8
47
48#define KVM_MAX_MMIO_FRAGMENTS \
49 (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
50
d9e368d6
AK
51/*
52 * vcpu->requests bit members
53 */
3176bc3e 54#define KVM_REQ_TLB_FLUSH 0
2f52d58c 55#define KVM_REQ_MIGRATE_TIMER 1
b209749f 56#define KVM_REQ_REPORT_TPR_ACCESS 2
2e53d63a 57#define KVM_REQ_MMU_RELOAD 3
71c4dfaf 58#define KVM_REQ_TRIPLE_FAULT 4
06e05645 59#define KVM_REQ_PENDING_TIMER 5
d7690175 60#define KVM_REQ_UNHALT 6
4731d4c7 61#define KVM_REQ_MMU_SYNC 7
34c238a1 62#define KVM_REQ_CLOCK_UPDATE 8
32f88400 63#define KVM_REQ_KICK 9
02daab21 64#define KVM_REQ_DEACTIVATE_FPU 10
3842d135 65#define KVM_REQ_EVENT 11
af585b92 66#define KVM_REQ_APF_HALT 12
c9aaa895 67#define KVM_REQ_STEAL_UPDATE 13
7460fb4a 68#define KVM_REQ_NMI 14
d6185f20 69#define KVM_REQ_IMMEDIATE_EXIT 15
f5132b01
GN
70#define KVM_REQ_PMU 16
71#define KVM_REQ_PMI 17
6aa8b732 72
5550af4d
SY
73#define KVM_USERSPACE_IRQ_SOURCE_ID 0
74
6c474694 75struct kvm;
6aa8b732 76struct kvm_vcpu;
c16f862d 77extern struct kmem_cache *kvm_vcpu_cache;
6aa8b732 78
743eeb0b
SL
79struct kvm_io_range {
80 gpa_t addr;
81 int len;
82 struct kvm_io_device *dev;
83};
84
786a9f88 85#define NR_IOBUS_DEVS 1000
a1300716 86
2eeb2e94
GH
87struct kvm_io_bus {
88 int dev_count;
a1300716 89 struct kvm_io_range range[];
2eeb2e94
GH
90};
91
e93f8a0f
MT
92enum kvm_bus {
93 KVM_MMIO_BUS,
94 KVM_PIO_BUS,
95 KVM_NR_BUSES
96};
97
98int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
99 int len, const void *val);
100int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
bda9020e 101 void *val);
743eeb0b
SL
102int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
103 int len, struct kvm_io_device *dev);
e93f8a0f
MT
104int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
105 struct kvm_io_device *dev);
2eeb2e94 106
af585b92
GN
107#ifdef CONFIG_KVM_ASYNC_PF
108struct kvm_async_pf {
109 struct work_struct work;
110 struct list_head link;
111 struct list_head queue;
112 struct kvm_vcpu *vcpu;
113 struct mm_struct *mm;
114 gva_t gva;
115 unsigned long addr;
116 struct kvm_arch_async_pf arch;
117 struct page *page;
118 bool done;
119};
120
121void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
122void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
123int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
124 struct kvm_arch_async_pf *arch);
344d9588 125int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
af585b92
GN
126#endif
127
6b7e2d09
XG
128enum {
129 OUTSIDE_GUEST_MODE,
130 IN_GUEST_MODE,
131 EXITING_GUEST_MODE
132};
133
f78146b0
AK
134/*
135 * Sometimes a large or cross-page mmio needs to be broken up into separate
136 * exits for userspace servicing.
137 */
138struct kvm_mmio_fragment {
139 gpa_t gpa;
140 void *data;
141 unsigned len;
142};
143
d17fbbf7
ZX
144struct kvm_vcpu {
145 struct kvm *kvm;
31bb117e 146#ifdef CONFIG_PREEMPT_NOTIFIERS
d17fbbf7 147 struct preempt_notifier preempt_notifier;
31bb117e 148#endif
6b7e2d09 149 int cpu;
d17fbbf7 150 int vcpu_id;
6b7e2d09
XG
151 int srcu_idx;
152 int mode;
d17fbbf7 153 unsigned long requests;
d0bfb940 154 unsigned long guest_debug;
6b7e2d09
XG
155
156 struct mutex mutex;
157 struct kvm_run *run;
f656ce01 158
d17fbbf7 159 int fpu_active;
2acf923e 160 int guest_fpu_loaded, guest_xcr0_loaded;
d17fbbf7 161 wait_queue_head_t wq;
34bb10b7 162 struct pid *pid;
d17fbbf7
ZX
163 int sigset_active;
164 sigset_t sigset;
165 struct kvm_vcpu_stat stat;
166
34c16eec 167#ifdef CONFIG_HAS_IOMEM
d17fbbf7
ZX
168 int mmio_needed;
169 int mmio_read_completed;
170 int mmio_is_write;
f78146b0
AK
171 int mmio_cur_fragment;
172 int mmio_nr_fragments;
173 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
34c16eec 174#endif
1165f5fe 175
af585b92
GN
176#ifdef CONFIG_KVM_ASYNC_PF
177 struct {
178 u32 queued;
179 struct list_head queue;
180 struct list_head done;
181 spinlock_t lock;
182 } async_pf;
183#endif
184
d657a98e
ZX
185 struct kvm_vcpu_arch arch;
186};
187
6b7e2d09
XG
188static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
189{
190 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
191}
192
660c22c4
TY
193/*
194 * Some of the bitops functions do not support too long bitmaps.
195 * This number must be determined not to exceed such limits.
196 */
197#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
198
6aa8b732
AK
199struct kvm_memory_slot {
200 gfn_t base_gfn;
201 unsigned long npages;
202 unsigned long flags;
290fc38d 203 unsigned long *rmap;
6aa8b732 204 unsigned long *dirty_bitmap;
db3fe4eb 205 struct kvm_arch_memory_slot arch;
8a7ae055 206 unsigned long userspace_addr;
80b14b5b 207 int user_alloc;
e36d96f7 208 int id;
6aa8b732
AK
209};
210
87bf6e7d
TY
211static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
212{
213 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
214}
215
399ec807
AK
216struct kvm_kernel_irq_routing_entry {
217 u32 gsi;
5116d8f6 218 u32 type;
4925663a 219 int (*set)(struct kvm_kernel_irq_routing_entry *e,
1a6e4a8c 220 struct kvm *kvm, int irq_source_id, int level);
399ec807
AK
221 union {
222 struct {
223 unsigned irqchip;
224 unsigned pin;
225 } irqchip;
79950e10 226 struct msi_msg msi;
399ec807 227 };
46e624b9
GN
228 struct hlist_node link;
229};
230
3e71f88b
GN
231#ifdef __KVM_HAVE_IOAPIC
232
46e624b9 233struct kvm_irq_routing_table {
3e71f88b 234 int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
46e624b9
GN
235 struct kvm_kernel_irq_routing_entry *rt_entries;
236 u32 nr_rt_entries;
237 /*
238 * Array indexed by gsi. Each entry contains list of irq chips
239 * the gsi is connected to.
240 */
241 struct hlist_head map[0];
399ec807
AK
242};
243
3e71f88b
GN
244#else
245
246struct kvm_irq_routing_table {};
247
248#endif
249
93a5cef0
XG
250#ifndef KVM_MEM_SLOTS_NUM
251#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
252#endif
253
bf3e05bc
XG
254/*
255 * Note:
256 * memslots are not sorted by id anymore, please use id_to_memslot()
257 * to get the memslot by its id.
258 */
46a26bf5 259struct kvm_memslots {
49c7754c 260 u64 generation;
93a5cef0 261 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
f85e2cb5
XG
262 /* The mapping table from slot id to the index in memslots[]. */
263 int id_to_index[KVM_MEM_SLOTS_NUM];
46a26bf5
MT
264};
265
6aa8b732 266struct kvm {
aaee2c94 267 spinlock_t mmu_lock;
79fac95e 268 struct mutex slots_lock;
6d4e4c4f 269 struct mm_struct *mm; /* userspace tied to this vm */
46a26bf5 270 struct kvm_memslots *memslots;
bc6678a3 271 struct srcu_struct srcu;
73880c80
GN
272#ifdef CONFIG_KVM_APIC_ARCHITECTURE
273 u32 bsp_vcpu_id;
73880c80 274#endif
fb3f0f51 275 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
73880c80 276 atomic_t online_vcpus;
217ece61 277 int last_boosted_vcpu;
133de902 278 struct list_head vm_list;
60eead79 279 struct mutex lock;
e93f8a0f 280 struct kvm_io_bus *buses[KVM_NR_BUSES];
721eecbf
GH
281#ifdef CONFIG_HAVE_KVM_EVENTFD
282 struct {
283 spinlock_t lock;
284 struct list_head items;
285 } irqfds;
d34e6b17 286 struct list_head ioeventfds;
721eecbf 287#endif
ba1389b7 288 struct kvm_vm_stat stat;
d69fb81f 289 struct kvm_arch arch;
d39f13b0 290 atomic_t users_count;
5f94c174 291#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
5f94c174 292 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
2b3c246a
SL
293 spinlock_t ring_lock;
294 struct list_head coalesced_zones;
5f94c174 295#endif
e930bffe 296
60eead79 297 struct mutex irq_lock;
75858a84 298#ifdef CONFIG_HAVE_KVM_IRQCHIP
bd2b53b2
MT
299 /*
300 * Update side is protected by irq_lock and,
301 * if configured, irqfds.lock.
302 */
4b6a2872 303 struct kvm_irq_routing_table __rcu *irq_routing;
75858a84 304 struct hlist_head mask_notifier_list;
136bdfee 305 struct hlist_head irq_ack_notifier_list;
75858a84
AK
306#endif
307
e930bffe
AA
308#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
309 struct mmu_notifier mmu_notifier;
310 unsigned long mmu_notifier_seq;
311 long mmu_notifier_count;
312#endif
5c663a15 313 long tlbs_dirty;
6aa8b732
AK
314};
315
f0242478
RR
316/* The guest did something we don't support. */
317#define pr_unimpl(vcpu, fmt, ...) \
bd80158a
JK
318 pr_err_ratelimited("kvm: %i: cpu%i " fmt, \
319 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__)
f0242478 320
6aa8b732
AK
321#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
322#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
323
988a2cae
GN
324static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
325{
326 smp_rmb();
327 return kvm->vcpus[i];
328}
329
330#define kvm_for_each_vcpu(idx, vcpup, kvm) \
b42fc3cb
JM
331 for (idx = 0; \
332 idx < atomic_read(&kvm->online_vcpus) && \
333 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
334 idx++)
988a2cae 335
be6ba0f0
XG
336#define kvm_for_each_memslot(memslot, slots) \
337 for (memslot = &slots->memslots[0]; \
bf3e05bc
XG
338 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
339 memslot++)
be6ba0f0 340
fb3f0f51
RR
341int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
342void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
343
313a3dc7
CO
344void vcpu_load(struct kvm_vcpu *vcpu);
345void vcpu_put(struct kvm_vcpu *vcpu);
346
0ee75bea 347int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
c16f862d 348 struct module *module);
cb498ea2 349void kvm_exit(void);
6aa8b732 350
d39f13b0
IE
351void kvm_get_kvm(struct kvm *kvm);
352void kvm_put_kvm(struct kvm *kvm);
be593d62 353void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
d39f13b0 354
90d83dc3
LJ
355static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
356{
357 return rcu_dereference_check(kvm->memslots,
358 srcu_read_lock_held(&kvm->srcu)
359 || lockdep_is_held(&kvm->slots_lock));
360}
361
28a37544
XG
362static inline struct kvm_memory_slot *
363id_to_memslot(struct kvm_memslots *slots, int id)
364{
f85e2cb5
XG
365 int index = slots->id_to_index[id];
366 struct kvm_memory_slot *slot;
bf3e05bc 367
f85e2cb5 368 slot = &slots->memslots[index];
bf3e05bc 369
f85e2cb5
XG
370 WARN_ON(slot->id != id);
371 return slot;
28a37544
XG
372}
373
6aa8b732
AK
374#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
375#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
376static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
6aa8b732 377
cea7bb21 378extern struct page *bad_page;
fce92dce
XG
379extern struct page *fault_page;
380
35149e21 381extern pfn_t bad_pfn;
fce92dce 382extern pfn_t fault_pfn;
6aa8b732 383
cea7bb21 384int is_error_page(struct page *page);
35149e21 385int is_error_pfn(pfn_t pfn);
bf998156 386int is_hwpoison_pfn(pfn_t pfn);
edba23e5 387int is_fault_pfn(pfn_t pfn);
fce92dce
XG
388int is_noslot_pfn(pfn_t pfn);
389int is_invalid_pfn(pfn_t pfn);
f9d46eb0 390int kvm_is_error_hva(unsigned long addr);
210c7c4d
IE
391int kvm_set_memory_region(struct kvm *kvm,
392 struct kvm_userspace_memory_region *mem,
393 int user_alloc);
f78e0e2e
SY
394int __kvm_set_memory_region(struct kvm *kvm,
395 struct kvm_userspace_memory_region *mem,
396 int user_alloc);
db3fe4eb
TY
397void kvm_arch_free_memslot(struct kvm_memory_slot *free,
398 struct kvm_memory_slot *dont);
399int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
f7784b8e
MT
400int kvm_arch_prepare_memory_region(struct kvm *kvm,
401 struct kvm_memory_slot *memslot,
402 struct kvm_memory_slot old,
403 struct kvm_userspace_memory_region *mem,
404 int user_alloc);
405void kvm_arch_commit_memory_region(struct kvm *kvm,
0de10343
ZX
406 struct kvm_userspace_memory_region *mem,
407 struct kvm_memory_slot old,
408 int user_alloc);
db3fe4eb 409bool kvm_largepages_enabled(void);
54dee993 410void kvm_disable_largepages(void);
34d4cb8f 411void kvm_arch_flush_shadow(struct kvm *kvm);
a983fb23 412
48987781
XG
413int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
414 int nr_pages);
415
954bbbc2 416struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
05da4558 417unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
b4231d61
IE
418void kvm_release_page_clean(struct page *page);
419void kvm_release_page_dirty(struct page *page);
35149e21
AL
420void kvm_set_page_dirty(struct page *page);
421void kvm_set_page_accessed(struct page *page);
422
887c08ac 423pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
365fb3fd 424pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
612819c3
MT
425pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
426 bool write_fault, bool *writable);
35149e21 427pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
612819c3
MT
428pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
429 bool *writable);
506f0d6f
MT
430pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
431 struct kvm_memory_slot *slot, gfn_t gfn);
35149e21
AL
432void kvm_release_pfn_dirty(pfn_t);
433void kvm_release_pfn_clean(pfn_t pfn);
434void kvm_set_pfn_dirty(pfn_t pfn);
435void kvm_set_pfn_accessed(pfn_t pfn);
436void kvm_get_pfn(pfn_t pfn);
437
195aefde
IE
438int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
439 int len);
7ec54588
MT
440int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
441 unsigned long len);
195aefde 442int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
e03b644f
GN
443int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
444 void *data, unsigned long len);
195aefde
IE
445int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
446 int offset, int len);
447int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
448 unsigned long len);
49c7754c
GN
449int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
450 void *data, unsigned long len);
451int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
452 gpa_t gpa);
195aefde
IE
453int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
454int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
6aa8b732 455struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
e0d62c7f 456int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
8f0b1ab6 457unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
6aa8b732 458void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
49c7754c
GN
459void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
460 gfn_t gfn);
6aa8b732 461
8776e519 462void kvm_vcpu_block(struct kvm_vcpu *vcpu);
b6d33834 463void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
41628d33 464bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
d255f4f2 465void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
6aa8b732 466void kvm_resched(struct kvm_vcpu *vcpu);
7702fd1f
AK
467void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
468void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
a4ee1ca4 469
d9e368d6 470void kvm_flush_remote_tlbs(struct kvm *kvm);
2e53d63a 471void kvm_reload_remote_mmus(struct kvm *kvm);
6aa8b732 472
043405e1
CO
473long kvm_arch_dev_ioctl(struct file *filp,
474 unsigned int ioctl, unsigned long arg);
313a3dc7
CO
475long kvm_arch_vcpu_ioctl(struct file *filp,
476 unsigned int ioctl, unsigned long arg);
5b1c1493 477int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
018d00d2
ZX
478
479int kvm_dev_ioctl_check_extension(long ext);
480
5bb064dc
ZX
481int kvm_get_dirty_log(struct kvm *kvm,
482 struct kvm_dirty_log *log, int *is_dirty);
483int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
484 struct kvm_dirty_log *log);
485
1fe779f8
CO
486int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
487 struct
488 kvm_userspace_memory_region *mem,
489 int user_alloc);
490long kvm_arch_vm_ioctl(struct file *filp,
491 unsigned int ioctl, unsigned long arg);
313a3dc7 492
d0752060
HB
493int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
494int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
495
8b006791
ZX
496int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
497 struct kvm_translation *tr);
498
b6c7a5dc
HB
499int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
500int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
501int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
502 struct kvm_sregs *sregs);
503int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
504 struct kvm_sregs *sregs);
62d9f0db
MT
505int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
506 struct kvm_mp_state *mp_state);
507int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
508 struct kvm_mp_state *mp_state);
d0bfb940
JK
509int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
510 struct kvm_guest_debug *dbg);
b6c7a5dc
HB
511int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
512
f8c16bba
ZX
513int kvm_arch_init(void *opaque);
514void kvm_arch_exit(void);
043405e1 515
e9b11c17
ZX
516int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
517void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
518
519void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
520void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
521void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
522struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
26e5215f 523int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
d40ccc62 524void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
e9b11c17
ZX
525
526int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
10474ae8 527int kvm_arch_hardware_enable(void *garbage);
e9b11c17
ZX
528void kvm_arch_hardware_disable(void *garbage);
529int kvm_arch_hardware_setup(void);
530void kvm_arch_hardware_unsetup(void);
531void kvm_arch_check_processor_compat(void *rtn);
1d737c8a 532int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
b6d33834 533int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
e9b11c17 534
d19a9cd2
ZX
535void kvm_free_physmem(struct kvm *kvm);
536
d89f5eff
JK
537#ifndef __KVM_HAVE_ARCH_VM_ALLOC
538static inline struct kvm *kvm_arch_alloc_vm(void)
539{
540 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
541}
542
543static inline void kvm_arch_free_vm(struct kvm *kvm)
544{
545 kfree(kvm);
546}
547#endif
548
b6d33834
CD
549static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
550{
2246f8b5
AG
551#ifdef __KVM_HAVE_ARCH_WQP
552 return vcpu->arch.wqp;
553#else
b6d33834 554 return &vcpu->wq;
b6d33834 555#endif
2246f8b5 556}
b6d33834 557
e08b9637 558int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
d19a9cd2 559void kvm_arch_destroy_vm(struct kvm *kvm);
8a98f664 560void kvm_free_all_assigned_devices(struct kvm *kvm);
ad8ba2cd 561void kvm_arch_sync_events(struct kvm *kvm);
e9b11c17 562
3d80840d 563int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
5736199a 564void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
682c59a3 565
c77fb9dc
XZ
566int kvm_is_mmio_pfn(pfn_t pfn);
567
62c476c7
BAY
568struct kvm_irq_ack_notifier {
569 struct hlist_node link;
570 unsigned gsi;
571 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
572};
573
574struct kvm_assigned_dev_kernel {
575 struct kvm_irq_ack_notifier ack_notifier;
62c476c7
BAY
576 struct list_head list;
577 int assigned_dev_id;
ab9f4ecb 578 int host_segnr;
62c476c7
BAY
579 int host_busnr;
580 int host_devfn;
c1e01514 581 unsigned int entries_nr;
62c476c7 582 int host_irq;
defaf158 583 bool host_irq_disabled;
07700a94 584 bool pci_2_3;
c1e01514 585 struct msix_entry *host_msix_entries;
62c476c7 586 int guest_irq;
0645211c 587 struct msix_entry *guest_msix_entries;
4f906c19 588 unsigned long irq_requested_type;
5550af4d 589 int irq_source_id;
b653574a 590 int flags;
62c476c7
BAY
591 struct pci_dev *dev;
592 struct kvm *kvm;
0645211c 593 spinlock_t intx_lock;
cf9eeac4 594 spinlock_t intx_mask_lock;
1e001d49 595 char irq_name[32];
f8fcfd77 596 struct pci_saved_state *pci_saved_state;
62c476c7 597};
75858a84
AK
598
599struct kvm_irq_mask_notifier {
600 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
601 int irq;
602 struct hlist_node link;
603};
604
605void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
606 struct kvm_irq_mask_notifier *kimn);
607void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
608 struct kvm_irq_mask_notifier *kimn);
4a994358
GN
609void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
610 bool mask);
75858a84 611
46e624b9
GN
612#ifdef __KVM_HAVE_IOAPIC
613void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
614 union kvm_ioapic_redirect_entry *entry,
615 unsigned long *deliver_bitmask);
616#endif
617int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
bd2b53b2
MT
618int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
619 int irq_source_id, int level);
44882eed 620void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
3de42dc0
XZ
621void kvm_register_irq_ack_notifier(struct kvm *kvm,
622 struct kvm_irq_ack_notifier *kian);
fa40a821
MT
623void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
624 struct kvm_irq_ack_notifier *kian);
5550af4d
SY
625int kvm_request_irq_source_id(struct kvm *kvm);
626void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
62c476c7 627
522c68c4
SY
628/* For vcpu->arch.iommu_flags */
629#define KVM_IOMMU_CACHE_COHERENCY 0x1
630
19de40a8 631#ifdef CONFIG_IOMMU_API
3ad26d81 632int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
32f6daad 633void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
260782bc 634int kvm_iommu_map_guest(struct kvm *kvm);
62c476c7 635int kvm_iommu_unmap_guest(struct kvm *kvm);
260782bc
WH
636int kvm_assign_device(struct kvm *kvm,
637 struct kvm_assigned_dev_kernel *assigned_dev);
0a920356
WH
638int kvm_deassign_device(struct kvm *kvm,
639 struct kvm_assigned_dev_kernel *assigned_dev);
19de40a8 640#else /* CONFIG_IOMMU_API */
62c476c7 641static inline int kvm_iommu_map_pages(struct kvm *kvm,
d7a79b6c 642 struct kvm_memory_slot *slot)
62c476c7
BAY
643{
644 return 0;
645}
646
32f6daad
AW
647static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
648 struct kvm_memory_slot *slot)
649{
650}
651
260782bc 652static inline int kvm_iommu_map_guest(struct kvm *kvm)
62c476c7
BAY
653{
654 return -ENODEV;
655}
656
657static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
658{
659 return 0;
660}
260782bc
WH
661
662static inline int kvm_assign_device(struct kvm *kvm,
663 struct kvm_assigned_dev_kernel *assigned_dev)
664{
665 return 0;
666}
0a920356
WH
667
668static inline int kvm_deassign_device(struct kvm *kvm,
669 struct kvm_assigned_dev_kernel *assigned_dev)
670{
671 return 0;
672}
19de40a8 673#endif /* CONFIG_IOMMU_API */
62c476c7 674
d172fcd3
LV
675static inline void kvm_guest_enter(void)
676{
8fa22068 677 BUG_ON(preemptible());
e56a7a28 678 account_system_vtime(current);
d172fcd3 679 current->flags |= PF_VCPU;
8fa22068
GN
680 /* KVM does not hold any references to rcu protected data when it
681 * switches CPU into a guest mode. In fact switching to a guest mode
682 * is very similar to exiting to userspase from rcu point of view. In
683 * addition CPU may stay in a guest mode for quite a long time (up to
684 * one time slice). Lets treat guest mode as quiescent state, just like
685 * we do with user-mode execution.
686 */
687 rcu_virt_note_context_switch(smp_processor_id());
d172fcd3
LV
688}
689
690static inline void kvm_guest_exit(void)
691{
e56a7a28 692 account_system_vtime(current);
d172fcd3
LV
693 current->flags &= ~PF_VCPU;
694}
695
9d4cba7f
PM
696/*
697 * search_memslots() and __gfn_to_memslot() are here because they are
698 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
699 * gfn_to_memslot() itself isn't here as an inline because that would
700 * bloat other code too much.
701 */
702static inline struct kvm_memory_slot *
703search_memslots(struct kvm_memslots *slots, gfn_t gfn)
704{
705 struct kvm_memory_slot *memslot;
706
707 kvm_for_each_memslot(memslot, slots)
708 if (gfn >= memslot->base_gfn &&
709 gfn < memslot->base_gfn + memslot->npages)
710 return memslot;
711
712 return NULL;
713}
714
715static inline struct kvm_memory_slot *
716__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
717{
718 return search_memslots(slots, gfn);
719}
720
0ee8dcb8
XG
721static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
722{
723 return gfn_to_memslot(kvm, gfn)->id;
724}
725
fb03cb6f
TY
726static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
727{
728 /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
729 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
730 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
731}
732
887c08ac
XG
733static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
734 gfn_t gfn)
735{
736 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
737}
738
1755fbcc
AK
739static inline gpa_t gfn_to_gpa(gfn_t gfn)
740{
741 return (gpa_t)gfn << PAGE_SHIFT;
742}
6aa8b732 743
c30a358d
JR
744static inline gfn_t gpa_to_gfn(gpa_t gpa)
745{
746 return (gfn_t)(gpa >> PAGE_SHIFT);
747}
748
62c476c7
BAY
749static inline hpa_t pfn_to_hpa(pfn_t pfn)
750{
751 return (hpa_t)pfn << PAGE_SHIFT;
752}
753
2f599714 754static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
2f52d58c
AK
755{
756 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
757}
758
ba1389b7
AK
759enum kvm_stat_kind {
760 KVM_STAT_VM,
761 KVM_STAT_VCPU,
762};
763
417bc304
HB
764struct kvm_stats_debugfs_item {
765 const char *name;
766 int offset;
ba1389b7 767 enum kvm_stat_kind kind;
417bc304
HB
768 struct dentry *dentry;
769};
770extern struct kvm_stats_debugfs_item debugfs_entries[];
76f7c879 771extern struct dentry *kvm_debugfs_dir;
d4c9ff2d 772
e930bffe
AA
773#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
774static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
775{
776 if (unlikely(vcpu->kvm->mmu_notifier_count))
777 return 1;
778 /*
a355aa54
PM
779 * Ensure the read of mmu_notifier_count happens before the read
780 * of mmu_notifier_seq. This interacts with the smp_wmb() in
781 * mmu_notifier_invalidate_range_end to make sure that the caller
782 * either sees the old (non-zero) value of mmu_notifier_count or
783 * the new (incremented) value of mmu_notifier_seq.
784 * PowerPC Book3s HV KVM calls this under a per-page lock
785 * rather than under kvm->mmu_lock, for scalability, so
786 * can't rely on kvm->mmu_lock to keep things ordered.
e930bffe 787 */
a355aa54 788 smp_rmb();
e930bffe
AA
789 if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
790 return 1;
791 return 0;
792}
793#endif
794
399ec807
AK
795#ifdef CONFIG_HAVE_KVM_IRQCHIP
796
797#define KVM_MAX_IRQ_ROUTES 1024
798
799int kvm_setup_default_irq_routing(struct kvm *kvm);
800int kvm_set_irq_routing(struct kvm *kvm,
801 const struct kvm_irq_routing_entry *entries,
802 unsigned nr,
803 unsigned flags);
804void kvm_free_irq_routing(struct kvm *kvm);
805
07975ad3
JK
806int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
807
399ec807
AK
808#else
809
810static inline void kvm_free_irq_routing(struct kvm *kvm) {}
811
812#endif
813
721eecbf
GH
814#ifdef CONFIG_HAVE_KVM_EVENTFD
815
d34e6b17 816void kvm_eventfd_init(struct kvm *kvm);
721eecbf
GH
817int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
818void kvm_irqfd_release(struct kvm *kvm);
bd2b53b2 819void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
d34e6b17 820int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
721eecbf
GH
821
822#else
823
d34e6b17 824static inline void kvm_eventfd_init(struct kvm *kvm) {}
bd2b53b2 825
721eecbf
GH
826static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
827{
828 return -EINVAL;
829}
830
831static inline void kvm_irqfd_release(struct kvm *kvm) {}
bd2b53b2 832
27923eb1 833#ifdef CONFIG_HAVE_KVM_IRQCHIP
bd2b53b2
MT
834static inline void kvm_irq_routing_update(struct kvm *kvm,
835 struct kvm_irq_routing_table *irq_rt)
836{
837 rcu_assign_pointer(kvm->irq_routing, irq_rt);
838}
27923eb1 839#endif
bd2b53b2 840
d34e6b17
GH
841static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
842{
843 return -ENOSYS;
844}
721eecbf
GH
845
846#endif /* CONFIG_HAVE_KVM_EVENTFD */
847
73880c80 848#ifdef CONFIG_KVM_APIC_ARCHITECTURE
c5af89b6
GN
849static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
850{
d3efc8ef 851 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
c5af89b6 852}
3e515705
AK
853
854bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
855
856#else
857
858static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
859
6aa8b732 860#endif
bfd99ff5
AK
861
862#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
863
864long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
865 unsigned long arg);
866
867#else
868
869static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
870 unsigned long arg)
871{
872 return -ENOTTY;
873}
874
73880c80 875#endif
bfd99ff5 876
a8eeb04a
AK
877static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
878{
879 set_bit(req, &vcpu->requests);
880}
881
a8eeb04a
AK
882static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
883{
0719837c
AK
884 if (test_bit(req, &vcpu->requests)) {
885 clear_bit(req, &vcpu->requests);
886 return true;
887 } else {
888 return false;
889 }
a8eeb04a
AK
890}
891
bfd99ff5
AK
892#endif
893