8e5c7b6516559e919140b15317b9578aa95f1c2f
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / kvm_host.h
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3
4 /*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/bug.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/preempt.h>
20 #include <linux/msi.h>
21 #include <linux/slab.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ratelimit.h>
24 #include <linux/err.h>
25 #include <asm/signal.h>
26
27 #include <linux/kvm.h>
28 #include <linux/kvm_para.h>
29
30 #include <linux/kvm_types.h>
31
32 #include <asm/kvm_host.h>
33
34 #ifndef KVM_MMIO_SIZE
35 #define KVM_MMIO_SIZE 8
36 #endif
37
38 /*
39 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
40 * in kvm, other bits are visible for userspace which are defined in
41 * include/linux/kvm_h.
42 */
43 #define KVM_MEMSLOT_INVALID (1UL << 16)
44
45 /*
46 * If we support unaligned MMIO, at most one fragment will be split into two:
47 */
48 #ifdef KVM_UNALIGNED_MMIO
49 # define KVM_EXTRA_MMIO_FRAGMENTS 1
50 #else
51 # define KVM_EXTRA_MMIO_FRAGMENTS 0
52 #endif
53
54 #define KVM_USER_MMIO_SIZE 8
55
56 #define KVM_MAX_MMIO_FRAGMENTS \
57 (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
58
59 /*
60 * For the normal pfn, the highest 12 bits should be zero,
61 * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
62 * mask bit 63 to indicate the noslot pfn.
63 */
64 #define KVM_PFN_ERR_MASK (0x7ffULL << 52)
65 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
66 #define KVM_PFN_NOSLOT (0x1ULL << 63)
67
68 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
69 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
70 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
71
72 /*
73 * error pfns indicate that the gfn is in slot but faild to
74 * translate it to pfn on host.
75 */
76 static inline bool is_error_pfn(pfn_t pfn)
77 {
78 return !!(pfn & KVM_PFN_ERR_MASK);
79 }
80
81 /*
82 * error_noslot pfns indicate that the gfn can not be
83 * translated to pfn - it is not in slot or failed to
84 * translate it to pfn.
85 */
86 static inline bool is_error_noslot_pfn(pfn_t pfn)
87 {
88 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
89 }
90
91 /* noslot pfn indicates that the gfn is not in slot. */
92 static inline bool is_noslot_pfn(pfn_t pfn)
93 {
94 return pfn == KVM_PFN_NOSLOT;
95 }
96
97 #define KVM_HVA_ERR_BAD (PAGE_OFFSET)
98 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
99
100 static inline bool kvm_is_error_hva(unsigned long addr)
101 {
102 return addr >= PAGE_OFFSET;
103 }
104
105 #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
106
107 static inline bool is_error_page(struct page *page)
108 {
109 return IS_ERR(page);
110 }
111
112 /*
113 * vcpu->requests bit members
114 */
115 #define KVM_REQ_TLB_FLUSH 0
116 #define KVM_REQ_MIGRATE_TIMER 1
117 #define KVM_REQ_REPORT_TPR_ACCESS 2
118 #define KVM_REQ_MMU_RELOAD 3
119 #define KVM_REQ_TRIPLE_FAULT 4
120 #define KVM_REQ_PENDING_TIMER 5
121 #define KVM_REQ_UNHALT 6
122 #define KVM_REQ_MMU_SYNC 7
123 #define KVM_REQ_CLOCK_UPDATE 8
124 #define KVM_REQ_KICK 9
125 #define KVM_REQ_DEACTIVATE_FPU 10
126 #define KVM_REQ_EVENT 11
127 #define KVM_REQ_APF_HALT 12
128 #define KVM_REQ_STEAL_UPDATE 13
129 #define KVM_REQ_NMI 14
130 #define KVM_REQ_IMMEDIATE_EXIT 15
131 #define KVM_REQ_PMU 16
132 #define KVM_REQ_PMI 17
133 #define KVM_REQ_WATCHDOG 18
134 #define KVM_REQ_MASTERCLOCK_UPDATE 19
135 #define KVM_REQ_MCLOCK_INPROGRESS 20
136
137 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
138 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
139
140 struct kvm;
141 struct kvm_vcpu;
142 extern struct kmem_cache *kvm_vcpu_cache;
143
144 struct kvm_io_range {
145 gpa_t addr;
146 int len;
147 struct kvm_io_device *dev;
148 };
149
150 #define NR_IOBUS_DEVS 1000
151
152 struct kvm_io_bus {
153 int dev_count;
154 struct kvm_io_range range[];
155 };
156
157 enum kvm_bus {
158 KVM_MMIO_BUS,
159 KVM_PIO_BUS,
160 KVM_NR_BUSES
161 };
162
163 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
164 int len, const void *val);
165 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
166 void *val);
167 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
168 int len, struct kvm_io_device *dev);
169 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
170 struct kvm_io_device *dev);
171
172 #ifdef CONFIG_KVM_ASYNC_PF
173 struct kvm_async_pf {
174 struct work_struct work;
175 struct list_head link;
176 struct list_head queue;
177 struct kvm_vcpu *vcpu;
178 struct mm_struct *mm;
179 gva_t gva;
180 unsigned long addr;
181 struct kvm_arch_async_pf arch;
182 struct page *page;
183 bool done;
184 };
185
186 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
187 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
188 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
189 struct kvm_arch_async_pf *arch);
190 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
191 #endif
192
193 enum {
194 OUTSIDE_GUEST_MODE,
195 IN_GUEST_MODE,
196 EXITING_GUEST_MODE,
197 READING_SHADOW_PAGE_TABLES,
198 };
199
200 /*
201 * Sometimes a large or cross-page mmio needs to be broken up into separate
202 * exits for userspace servicing.
203 */
204 struct kvm_mmio_fragment {
205 gpa_t gpa;
206 void *data;
207 unsigned len;
208 };
209
210 struct kvm_vcpu {
211 struct kvm *kvm;
212 #ifdef CONFIG_PREEMPT_NOTIFIERS
213 struct preempt_notifier preempt_notifier;
214 #endif
215 int cpu;
216 int vcpu_id;
217 int srcu_idx;
218 int mode;
219 unsigned long requests;
220 unsigned long guest_debug;
221
222 struct mutex mutex;
223 struct kvm_run *run;
224
225 int fpu_active;
226 int guest_fpu_loaded, guest_xcr0_loaded;
227 wait_queue_head_t wq;
228 struct pid *pid;
229 int sigset_active;
230 sigset_t sigset;
231 struct kvm_vcpu_stat stat;
232
233 #ifdef CONFIG_HAS_IOMEM
234 int mmio_needed;
235 int mmio_read_completed;
236 int mmio_is_write;
237 int mmio_cur_fragment;
238 int mmio_nr_fragments;
239 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
240 #endif
241
242 #ifdef CONFIG_KVM_ASYNC_PF
243 struct {
244 u32 queued;
245 struct list_head queue;
246 struct list_head done;
247 spinlock_t lock;
248 } async_pf;
249 #endif
250
251 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
252 /*
253 * Cpu relax intercept or pause loop exit optimization
254 * in_spin_loop: set when a vcpu does a pause loop exit
255 * or cpu relax intercepted.
256 * dy_eligible: indicates whether vcpu is eligible for directed yield.
257 */
258 struct {
259 bool in_spin_loop;
260 bool dy_eligible;
261 } spin_loop;
262 #endif
263 struct kvm_vcpu_arch arch;
264 };
265
266 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
267 {
268 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
269 }
270
271 /*
272 * Some of the bitops functions do not support too long bitmaps.
273 * This number must be determined not to exceed such limits.
274 */
275 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
276
277 struct kvm_memory_slot {
278 gfn_t base_gfn;
279 unsigned long npages;
280 unsigned long flags;
281 unsigned long *dirty_bitmap;
282 struct kvm_arch_memory_slot arch;
283 unsigned long userspace_addr;
284 int user_alloc;
285 int id;
286 };
287
288 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
289 {
290 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
291 }
292
293 struct kvm_kernel_irq_routing_entry {
294 u32 gsi;
295 u32 type;
296 int (*set)(struct kvm_kernel_irq_routing_entry *e,
297 struct kvm *kvm, int irq_source_id, int level);
298 union {
299 struct {
300 unsigned irqchip;
301 unsigned pin;
302 } irqchip;
303 struct msi_msg msi;
304 };
305 struct hlist_node link;
306 };
307
308 #ifdef __KVM_HAVE_IOAPIC
309
310 struct kvm_irq_routing_table {
311 int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
312 struct kvm_kernel_irq_routing_entry *rt_entries;
313 u32 nr_rt_entries;
314 /*
315 * Array indexed by gsi. Each entry contains list of irq chips
316 * the gsi is connected to.
317 */
318 struct hlist_head map[0];
319 };
320
321 #else
322
323 struct kvm_irq_routing_table {};
324
325 #endif
326
327 #ifndef KVM_MEM_SLOTS_NUM
328 #define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
329 #endif
330
331 /*
332 * Note:
333 * memslots are not sorted by id anymore, please use id_to_memslot()
334 * to get the memslot by its id.
335 */
336 struct kvm_memslots {
337 u64 generation;
338 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
339 /* The mapping table from slot id to the index in memslots[]. */
340 int id_to_index[KVM_MEM_SLOTS_NUM];
341 };
342
343 struct kvm {
344 spinlock_t mmu_lock;
345 struct mutex slots_lock;
346 struct mm_struct *mm; /* userspace tied to this vm */
347 struct kvm_memslots *memslots;
348 struct srcu_struct srcu;
349 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
350 u32 bsp_vcpu_id;
351 #endif
352 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
353 atomic_t online_vcpus;
354 int last_boosted_vcpu;
355 struct list_head vm_list;
356 struct mutex lock;
357 struct kvm_io_bus *buses[KVM_NR_BUSES];
358 #ifdef CONFIG_HAVE_KVM_EVENTFD
359 struct {
360 spinlock_t lock;
361 struct list_head items;
362 struct list_head resampler_list;
363 struct mutex resampler_lock;
364 } irqfds;
365 struct list_head ioeventfds;
366 #endif
367 struct kvm_vm_stat stat;
368 struct kvm_arch arch;
369 atomic_t users_count;
370 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
371 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
372 spinlock_t ring_lock;
373 struct list_head coalesced_zones;
374 #endif
375
376 struct mutex irq_lock;
377 #ifdef CONFIG_HAVE_KVM_IRQCHIP
378 /*
379 * Update side is protected by irq_lock and,
380 * if configured, irqfds.lock.
381 */
382 struct kvm_irq_routing_table __rcu *irq_routing;
383 struct hlist_head mask_notifier_list;
384 struct hlist_head irq_ack_notifier_list;
385 #endif
386
387 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
388 struct mmu_notifier mmu_notifier;
389 unsigned long mmu_notifier_seq;
390 long mmu_notifier_count;
391 #endif
392 long tlbs_dirty;
393 };
394
395 #define kvm_err(fmt, ...) \
396 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
397 #define kvm_info(fmt, ...) \
398 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
399 #define kvm_debug(fmt, ...) \
400 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
401 #define kvm_pr_unimpl(fmt, ...) \
402 pr_err_ratelimited("kvm [%i]: " fmt, \
403 task_tgid_nr(current), ## __VA_ARGS__)
404
405 /* The guest did something we don't support. */
406 #define vcpu_unimpl(vcpu, fmt, ...) \
407 kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
408
409 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
410 {
411 smp_rmb();
412 return kvm->vcpus[i];
413 }
414
415 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
416 for (idx = 0; \
417 idx < atomic_read(&kvm->online_vcpus) && \
418 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
419 idx++)
420
421 #define kvm_for_each_memslot(memslot, slots) \
422 for (memslot = &slots->memslots[0]; \
423 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
424 memslot++)
425
426 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
427 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
428
429 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
430 void vcpu_put(struct kvm_vcpu *vcpu);
431
432 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
433 struct module *module);
434 void kvm_exit(void);
435
436 void kvm_get_kvm(struct kvm *kvm);
437 void kvm_put_kvm(struct kvm *kvm);
438 void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
439
440 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
441 {
442 return rcu_dereference_check(kvm->memslots,
443 srcu_read_lock_held(&kvm->srcu)
444 || lockdep_is_held(&kvm->slots_lock));
445 }
446
447 static inline struct kvm_memory_slot *
448 id_to_memslot(struct kvm_memslots *slots, int id)
449 {
450 int index = slots->id_to_index[id];
451 struct kvm_memory_slot *slot;
452
453 slot = &slots->memslots[index];
454
455 WARN_ON(slot->id != id);
456 return slot;
457 }
458
459 int kvm_set_memory_region(struct kvm *kvm,
460 struct kvm_userspace_memory_region *mem,
461 int user_alloc);
462 int __kvm_set_memory_region(struct kvm *kvm,
463 struct kvm_userspace_memory_region *mem,
464 int user_alloc);
465 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
466 struct kvm_memory_slot *dont);
467 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
468 int kvm_arch_prepare_memory_region(struct kvm *kvm,
469 struct kvm_memory_slot *memslot,
470 struct kvm_memory_slot old,
471 struct kvm_userspace_memory_region *mem,
472 int user_alloc);
473 void kvm_arch_commit_memory_region(struct kvm *kvm,
474 struct kvm_userspace_memory_region *mem,
475 struct kvm_memory_slot old,
476 int user_alloc);
477 bool kvm_largepages_enabled(void);
478 void kvm_disable_largepages(void);
479 /* flush all memory translations */
480 void kvm_arch_flush_shadow_all(struct kvm *kvm);
481 /* flush memory translations pointing to 'slot' */
482 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
483 struct kvm_memory_slot *slot);
484
485 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
486 int nr_pages);
487
488 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
489 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
490 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
491 void kvm_release_page_clean(struct page *page);
492 void kvm_release_page_dirty(struct page *page);
493 void kvm_set_page_dirty(struct page *page);
494 void kvm_set_page_accessed(struct page *page);
495
496 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
497 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
498 bool write_fault, bool *writable);
499 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
500 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
501 bool *writable);
502 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
503 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
504
505 void kvm_release_pfn_dirty(pfn_t pfn);
506 void kvm_release_pfn_clean(pfn_t pfn);
507 void kvm_set_pfn_dirty(pfn_t pfn);
508 void kvm_set_pfn_accessed(pfn_t pfn);
509 void kvm_get_pfn(pfn_t pfn);
510
511 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
512 int len);
513 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
514 unsigned long len);
515 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
516 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
517 void *data, unsigned long len);
518 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
519 int offset, int len);
520 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
521 unsigned long len);
522 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
523 void *data, unsigned long len);
524 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
525 gpa_t gpa);
526 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
527 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
528 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
529 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
530 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
531 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
532 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
533 gfn_t gfn);
534
535 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
536 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
537 bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
538 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
539 void kvm_resched(struct kvm_vcpu *vcpu);
540 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
541 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
542
543 void kvm_flush_remote_tlbs(struct kvm *kvm);
544 void kvm_reload_remote_mmus(struct kvm *kvm);
545 void kvm_make_mclock_inprogress_request(struct kvm *kvm);
546
547 long kvm_arch_dev_ioctl(struct file *filp,
548 unsigned int ioctl, unsigned long arg);
549 long kvm_arch_vcpu_ioctl(struct file *filp,
550 unsigned int ioctl, unsigned long arg);
551 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
552
553 int kvm_dev_ioctl_check_extension(long ext);
554
555 int kvm_get_dirty_log(struct kvm *kvm,
556 struct kvm_dirty_log *log, int *is_dirty);
557 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
558 struct kvm_dirty_log *log);
559
560 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
561 struct
562 kvm_userspace_memory_region *mem,
563 int user_alloc);
564 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
565 long kvm_arch_vm_ioctl(struct file *filp,
566 unsigned int ioctl, unsigned long arg);
567
568 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
569 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
570
571 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
572 struct kvm_translation *tr);
573
574 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
575 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
576 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
577 struct kvm_sregs *sregs);
578 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
579 struct kvm_sregs *sregs);
580 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
581 struct kvm_mp_state *mp_state);
582 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
583 struct kvm_mp_state *mp_state);
584 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
585 struct kvm_guest_debug *dbg);
586 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
587
588 int kvm_arch_init(void *opaque);
589 void kvm_arch_exit(void);
590
591 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
592 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
593
594 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
595 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
596 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
597 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
598 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
599 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
600 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
601
602 int kvm_arch_hardware_enable(void *garbage);
603 void kvm_arch_hardware_disable(void *garbage);
604 int kvm_arch_hardware_setup(void);
605 void kvm_arch_hardware_unsetup(void);
606 void kvm_arch_check_processor_compat(void *rtn);
607 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
608 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
609
610 void kvm_free_physmem(struct kvm *kvm);
611
612 void *kvm_kvzalloc(unsigned long size);
613 void kvm_kvfree(const void *addr);
614
615 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
616 static inline struct kvm *kvm_arch_alloc_vm(void)
617 {
618 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
619 }
620
621 static inline void kvm_arch_free_vm(struct kvm *kvm)
622 {
623 kfree(kvm);
624 }
625 #endif
626
627 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
628 {
629 #ifdef __KVM_HAVE_ARCH_WQP
630 return vcpu->arch.wqp;
631 #else
632 return &vcpu->wq;
633 #endif
634 }
635
636 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
637 void kvm_arch_destroy_vm(struct kvm *kvm);
638 void kvm_free_all_assigned_devices(struct kvm *kvm);
639 void kvm_arch_sync_events(struct kvm *kvm);
640
641 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
642 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
643
644 bool kvm_is_mmio_pfn(pfn_t pfn);
645
646 struct kvm_irq_ack_notifier {
647 struct hlist_node link;
648 unsigned gsi;
649 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
650 };
651
652 struct kvm_assigned_dev_kernel {
653 struct kvm_irq_ack_notifier ack_notifier;
654 struct list_head list;
655 int assigned_dev_id;
656 int host_segnr;
657 int host_busnr;
658 int host_devfn;
659 unsigned int entries_nr;
660 int host_irq;
661 bool host_irq_disabled;
662 bool pci_2_3;
663 struct msix_entry *host_msix_entries;
664 int guest_irq;
665 struct msix_entry *guest_msix_entries;
666 unsigned long irq_requested_type;
667 int irq_source_id;
668 int flags;
669 struct pci_dev *dev;
670 struct kvm *kvm;
671 spinlock_t intx_lock;
672 spinlock_t intx_mask_lock;
673 char irq_name[32];
674 struct pci_saved_state *pci_saved_state;
675 };
676
677 struct kvm_irq_mask_notifier {
678 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
679 int irq;
680 struct hlist_node link;
681 };
682
683 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
684 struct kvm_irq_mask_notifier *kimn);
685 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
686 struct kvm_irq_mask_notifier *kimn);
687 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
688 bool mask);
689
690 #ifdef __KVM_HAVE_IOAPIC
691 void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
692 union kvm_ioapic_redirect_entry *entry,
693 unsigned long *deliver_bitmask);
694 #endif
695 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
696 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
697 int irq_source_id, int level);
698 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
699 void kvm_register_irq_ack_notifier(struct kvm *kvm,
700 struct kvm_irq_ack_notifier *kian);
701 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
702 struct kvm_irq_ack_notifier *kian);
703 int kvm_request_irq_source_id(struct kvm *kvm);
704 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
705
706 /* For vcpu->arch.iommu_flags */
707 #define KVM_IOMMU_CACHE_COHERENCY 0x1
708
709 #ifdef CONFIG_IOMMU_API
710 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
711 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
712 int kvm_iommu_map_guest(struct kvm *kvm);
713 int kvm_iommu_unmap_guest(struct kvm *kvm);
714 int kvm_assign_device(struct kvm *kvm,
715 struct kvm_assigned_dev_kernel *assigned_dev);
716 int kvm_deassign_device(struct kvm *kvm,
717 struct kvm_assigned_dev_kernel *assigned_dev);
718 #else /* CONFIG_IOMMU_API */
719 static inline int kvm_iommu_map_pages(struct kvm *kvm,
720 struct kvm_memory_slot *slot)
721 {
722 return 0;
723 }
724
725 static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
726 struct kvm_memory_slot *slot)
727 {
728 }
729
730 static inline int kvm_iommu_map_guest(struct kvm *kvm)
731 {
732 return -ENODEV;
733 }
734
735 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
736 {
737 return 0;
738 }
739
740 static inline int kvm_assign_device(struct kvm *kvm,
741 struct kvm_assigned_dev_kernel *assigned_dev)
742 {
743 return 0;
744 }
745
746 static inline int kvm_deassign_device(struct kvm *kvm,
747 struct kvm_assigned_dev_kernel *assigned_dev)
748 {
749 return 0;
750 }
751 #endif /* CONFIG_IOMMU_API */
752
753 static inline void kvm_guest_enter(void)
754 {
755 BUG_ON(preemptible());
756 vtime_account(current);
757 current->flags |= PF_VCPU;
758 /* KVM does not hold any references to rcu protected data when it
759 * switches CPU into a guest mode. In fact switching to a guest mode
760 * is very similar to exiting to userspase from rcu point of view. In
761 * addition CPU may stay in a guest mode for quite a long time (up to
762 * one time slice). Lets treat guest mode as quiescent state, just like
763 * we do with user-mode execution.
764 */
765 rcu_virt_note_context_switch(smp_processor_id());
766 }
767
768 static inline void kvm_guest_exit(void)
769 {
770 vtime_account(current);
771 current->flags &= ~PF_VCPU;
772 }
773
774 /*
775 * search_memslots() and __gfn_to_memslot() are here because they are
776 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
777 * gfn_to_memslot() itself isn't here as an inline because that would
778 * bloat other code too much.
779 */
780 static inline struct kvm_memory_slot *
781 search_memslots(struct kvm_memslots *slots, gfn_t gfn)
782 {
783 struct kvm_memory_slot *memslot;
784
785 kvm_for_each_memslot(memslot, slots)
786 if (gfn >= memslot->base_gfn &&
787 gfn < memslot->base_gfn + memslot->npages)
788 return memslot;
789
790 return NULL;
791 }
792
793 static inline struct kvm_memory_slot *
794 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
795 {
796 return search_memslots(slots, gfn);
797 }
798
799 static inline unsigned long
800 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
801 {
802 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
803 }
804
805 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
806 {
807 return gfn_to_memslot(kvm, gfn)->id;
808 }
809
810 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
811 {
812 /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
813 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
814 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
815 }
816
817 static inline gfn_t
818 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
819 {
820 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
821
822 return slot->base_gfn + gfn_offset;
823 }
824
825 static inline gpa_t gfn_to_gpa(gfn_t gfn)
826 {
827 return (gpa_t)gfn << PAGE_SHIFT;
828 }
829
830 static inline gfn_t gpa_to_gfn(gpa_t gpa)
831 {
832 return (gfn_t)(gpa >> PAGE_SHIFT);
833 }
834
835 static inline hpa_t pfn_to_hpa(pfn_t pfn)
836 {
837 return (hpa_t)pfn << PAGE_SHIFT;
838 }
839
840 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
841 {
842 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
843 }
844
845 enum kvm_stat_kind {
846 KVM_STAT_VM,
847 KVM_STAT_VCPU,
848 };
849
850 struct kvm_stats_debugfs_item {
851 const char *name;
852 int offset;
853 enum kvm_stat_kind kind;
854 struct dentry *dentry;
855 };
856 extern struct kvm_stats_debugfs_item debugfs_entries[];
857 extern struct dentry *kvm_debugfs_dir;
858
859 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
860 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
861 {
862 if (unlikely(kvm->mmu_notifier_count))
863 return 1;
864 /*
865 * Ensure the read of mmu_notifier_count happens before the read
866 * of mmu_notifier_seq. This interacts with the smp_wmb() in
867 * mmu_notifier_invalidate_range_end to make sure that the caller
868 * either sees the old (non-zero) value of mmu_notifier_count or
869 * the new (incremented) value of mmu_notifier_seq.
870 * PowerPC Book3s HV KVM calls this under a per-page lock
871 * rather than under kvm->mmu_lock, for scalability, so
872 * can't rely on kvm->mmu_lock to keep things ordered.
873 */
874 smp_rmb();
875 if (kvm->mmu_notifier_seq != mmu_seq)
876 return 1;
877 return 0;
878 }
879 #endif
880
881 #ifdef KVM_CAP_IRQ_ROUTING
882
883 #define KVM_MAX_IRQ_ROUTES 1024
884
885 int kvm_setup_default_irq_routing(struct kvm *kvm);
886 int kvm_set_irq_routing(struct kvm *kvm,
887 const struct kvm_irq_routing_entry *entries,
888 unsigned nr,
889 unsigned flags);
890 void kvm_free_irq_routing(struct kvm *kvm);
891
892 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
893
894 #else
895
896 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
897
898 #endif
899
900 #ifdef CONFIG_HAVE_KVM_EVENTFD
901
902 void kvm_eventfd_init(struct kvm *kvm);
903 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
904 void kvm_irqfd_release(struct kvm *kvm);
905 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
906 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
907
908 #else
909
910 static inline void kvm_eventfd_init(struct kvm *kvm) {}
911
912 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
913 {
914 return -EINVAL;
915 }
916
917 static inline void kvm_irqfd_release(struct kvm *kvm) {}
918
919 #ifdef CONFIG_HAVE_KVM_IRQCHIP
920 static inline void kvm_irq_routing_update(struct kvm *kvm,
921 struct kvm_irq_routing_table *irq_rt)
922 {
923 rcu_assign_pointer(kvm->irq_routing, irq_rt);
924 }
925 #endif
926
927 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
928 {
929 return -ENOSYS;
930 }
931
932 #endif /* CONFIG_HAVE_KVM_EVENTFD */
933
934 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
935 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
936 {
937 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
938 }
939
940 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
941
942 #else
943
944 static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
945
946 #endif
947
948 #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
949
950 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
951 unsigned long arg);
952
953 #else
954
955 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
956 unsigned long arg)
957 {
958 return -ENOTTY;
959 }
960
961 #endif
962
963 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
964 {
965 set_bit(req, &vcpu->requests);
966 }
967
968 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
969 {
970 if (test_bit(req, &vcpu->requests)) {
971 clear_bit(req, &vcpu->requests);
972 return true;
973 } else {
974 return false;
975 }
976 }
977
978 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
979
980 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
981 {
982 vcpu->spin_loop.in_spin_loop = val;
983 }
984 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
985 {
986 vcpu->spin_loop.dy_eligible = val;
987 }
988
989 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
990
991 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
992 {
993 }
994
995 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
996 {
997 }
998
999 static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1000 {
1001 return true;
1002 }
1003
1004 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1005 #endif
1006