KVM: Introduce kvm_vcpu_is_bsp() function.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / kvm_host.h
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3
4 /*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/preempt.h>
18 #include <linux/marker.h>
19 #include <linux/msi.h>
20 #include <asm/signal.h>
21
22 #include <linux/kvm.h>
23 #include <linux/kvm_para.h>
24
25 #include <linux/kvm_types.h>
26
27 #include <asm/kvm_host.h>
28
29 /*
30 * vcpu->requests bit members
31 */
32 #define KVM_REQ_TLB_FLUSH 0
33 #define KVM_REQ_MIGRATE_TIMER 1
34 #define KVM_REQ_REPORT_TPR_ACCESS 2
35 #define KVM_REQ_MMU_RELOAD 3
36 #define KVM_REQ_TRIPLE_FAULT 4
37 #define KVM_REQ_PENDING_TIMER 5
38 #define KVM_REQ_UNHALT 6
39 #define KVM_REQ_MMU_SYNC 7
40 #define KVM_REQ_KVMCLOCK_UPDATE 8
41 #define KVM_REQ_KICK 9
42
43 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
44
45 struct kvm_vcpu;
46 extern struct kmem_cache *kvm_vcpu_cache;
47
48 /*
49 * It would be nice to use something smarter than a linear search, TBD...
50 * Thankfully we dont expect many devices to register (famous last words :),
51 * so until then it will suffice. At least its abstracted so we can change
52 * in one place.
53 */
54 struct kvm_io_bus {
55 int dev_count;
56 #define NR_IOBUS_DEVS 6
57 struct kvm_io_device *devs[NR_IOBUS_DEVS];
58 };
59
60 void kvm_io_bus_init(struct kvm_io_bus *bus);
61 void kvm_io_bus_destroy(struct kvm_io_bus *bus);
62 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
63 gpa_t addr, int len, int is_write);
64 void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
65 struct kvm_io_device *dev);
66
67 struct kvm_vcpu {
68 struct kvm *kvm;
69 #ifdef CONFIG_PREEMPT_NOTIFIERS
70 struct preempt_notifier preempt_notifier;
71 #endif
72 int vcpu_id;
73 struct mutex mutex;
74 int cpu;
75 struct kvm_run *run;
76 unsigned long requests;
77 unsigned long guest_debug;
78 int fpu_active;
79 int guest_fpu_loaded;
80 wait_queue_head_t wq;
81 int sigset_active;
82 sigset_t sigset;
83 struct kvm_vcpu_stat stat;
84
85 #ifdef CONFIG_HAS_IOMEM
86 int mmio_needed;
87 int mmio_read_completed;
88 int mmio_is_write;
89 int mmio_size;
90 unsigned char mmio_data[8];
91 gpa_t mmio_phys_addr;
92 #endif
93
94 struct kvm_vcpu_arch arch;
95 };
96
97 struct kvm_memory_slot {
98 gfn_t base_gfn;
99 unsigned long npages;
100 unsigned long flags;
101 unsigned long *rmap;
102 unsigned long *dirty_bitmap;
103 struct {
104 unsigned long rmap_pde;
105 int write_count;
106 } *lpage_info;
107 unsigned long userspace_addr;
108 int user_alloc;
109 };
110
111 struct kvm_kernel_irq_routing_entry {
112 u32 gsi;
113 u32 type;
114 int (*set)(struct kvm_kernel_irq_routing_entry *e,
115 struct kvm *kvm, int level);
116 union {
117 struct {
118 unsigned irqchip;
119 unsigned pin;
120 } irqchip;
121 struct msi_msg msi;
122 };
123 struct list_head link;
124 };
125
126 struct kvm {
127 spinlock_t mmu_lock;
128 spinlock_t requests_lock;
129 struct rw_semaphore slots_lock;
130 struct mm_struct *mm; /* userspace tied to this vm */
131 int nmemslots;
132 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
133 KVM_PRIVATE_MEM_SLOTS];
134 struct kvm_vcpu *bsp_vcpu;
135 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
136 struct list_head vm_list;
137 struct mutex lock;
138 struct kvm_io_bus mmio_bus;
139 struct kvm_io_bus pio_bus;
140 #ifdef CONFIG_HAVE_KVM_EVENTFD
141 struct {
142 spinlock_t lock;
143 struct list_head items;
144 } irqfds;
145 #endif
146 struct kvm_vm_stat stat;
147 struct kvm_arch arch;
148 atomic_t users_count;
149 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
150 struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
151 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
152 #endif
153
154 struct mutex irq_lock;
155 #ifdef CONFIG_HAVE_KVM_IRQCHIP
156 struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */
157 struct hlist_head mask_notifier_list;
158 #endif
159
160 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
161 struct mmu_notifier mmu_notifier;
162 unsigned long mmu_notifier_seq;
163 long mmu_notifier_count;
164 #endif
165 };
166
167 /* The guest did something we don't support. */
168 #define pr_unimpl(vcpu, fmt, ...) \
169 do { \
170 if (printk_ratelimit()) \
171 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
172 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
173 } while (0)
174
175 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
176 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
177
178 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
179 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
180
181 void vcpu_load(struct kvm_vcpu *vcpu);
182 void vcpu_put(struct kvm_vcpu *vcpu);
183
184 int kvm_init(void *opaque, unsigned int vcpu_size,
185 struct module *module);
186 void kvm_exit(void);
187
188 void kvm_get_kvm(struct kvm *kvm);
189 void kvm_put_kvm(struct kvm *kvm);
190
191 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
192 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
193 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
194 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
195
196 extern struct page *bad_page;
197 extern pfn_t bad_pfn;
198
199 int is_error_page(struct page *page);
200 int is_error_pfn(pfn_t pfn);
201 int kvm_is_error_hva(unsigned long addr);
202 int kvm_set_memory_region(struct kvm *kvm,
203 struct kvm_userspace_memory_region *mem,
204 int user_alloc);
205 int __kvm_set_memory_region(struct kvm *kvm,
206 struct kvm_userspace_memory_region *mem,
207 int user_alloc);
208 int kvm_arch_set_memory_region(struct kvm *kvm,
209 struct kvm_userspace_memory_region *mem,
210 struct kvm_memory_slot old,
211 int user_alloc);
212 void kvm_arch_flush_shadow(struct kvm *kvm);
213 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
214 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
215 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
216 void kvm_release_page_clean(struct page *page);
217 void kvm_release_page_dirty(struct page *page);
218 void kvm_set_page_dirty(struct page *page);
219 void kvm_set_page_accessed(struct page *page);
220
221 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
222 void kvm_release_pfn_dirty(pfn_t);
223 void kvm_release_pfn_clean(pfn_t pfn);
224 void kvm_set_pfn_dirty(pfn_t pfn);
225 void kvm_set_pfn_accessed(pfn_t pfn);
226 void kvm_get_pfn(pfn_t pfn);
227
228 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
229 int len);
230 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
231 unsigned long len);
232 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
233 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
234 int offset, int len);
235 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
236 unsigned long len);
237 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
238 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
239 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
240 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
241 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
242
243 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
244 void kvm_resched(struct kvm_vcpu *vcpu);
245 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
246 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
247 void kvm_flush_remote_tlbs(struct kvm *kvm);
248 void kvm_reload_remote_mmus(struct kvm *kvm);
249
250 long kvm_arch_dev_ioctl(struct file *filp,
251 unsigned int ioctl, unsigned long arg);
252 long kvm_arch_vcpu_ioctl(struct file *filp,
253 unsigned int ioctl, unsigned long arg);
254
255 int kvm_dev_ioctl_check_extension(long ext);
256
257 int kvm_get_dirty_log(struct kvm *kvm,
258 struct kvm_dirty_log *log, int *is_dirty);
259 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
260 struct kvm_dirty_log *log);
261
262 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
263 struct
264 kvm_userspace_memory_region *mem,
265 int user_alloc);
266 long kvm_arch_vm_ioctl(struct file *filp,
267 unsigned int ioctl, unsigned long arg);
268
269 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
270 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
271
272 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
273 struct kvm_translation *tr);
274
275 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
276 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
277 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
278 struct kvm_sregs *sregs);
279 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
280 struct kvm_sregs *sregs);
281 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
282 struct kvm_mp_state *mp_state);
283 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
284 struct kvm_mp_state *mp_state);
285 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
286 struct kvm_guest_debug *dbg);
287 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
288
289 int kvm_arch_init(void *opaque);
290 void kvm_arch_exit(void);
291
292 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
293 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
294
295 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
296 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
297 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
298 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
299 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
300 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
301
302 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
303 void kvm_arch_hardware_enable(void *garbage);
304 void kvm_arch_hardware_disable(void *garbage);
305 int kvm_arch_hardware_setup(void);
306 void kvm_arch_hardware_unsetup(void);
307 void kvm_arch_check_processor_compat(void *rtn);
308 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
309 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
310
311 void kvm_free_physmem(struct kvm *kvm);
312
313 struct kvm *kvm_arch_create_vm(void);
314 void kvm_arch_destroy_vm(struct kvm *kvm);
315 void kvm_free_all_assigned_devices(struct kvm *kvm);
316 void kvm_arch_sync_events(struct kvm *kvm);
317
318 int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
319 int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
320 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
321 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
322
323 int kvm_is_mmio_pfn(pfn_t pfn);
324
325 struct kvm_irq_ack_notifier {
326 struct hlist_node link;
327 unsigned gsi;
328 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
329 };
330
331 #define KVM_ASSIGNED_MSIX_PENDING 0x1
332 struct kvm_guest_msix_entry {
333 u32 vector;
334 u16 entry;
335 u16 flags;
336 };
337
338 struct kvm_assigned_dev_kernel {
339 struct kvm_irq_ack_notifier ack_notifier;
340 struct work_struct interrupt_work;
341 struct list_head list;
342 int assigned_dev_id;
343 int host_busnr;
344 int host_devfn;
345 unsigned int entries_nr;
346 int host_irq;
347 bool host_irq_disabled;
348 struct msix_entry *host_msix_entries;
349 int guest_irq;
350 struct kvm_guest_msix_entry *guest_msix_entries;
351 unsigned long irq_requested_type;
352 int irq_source_id;
353 int flags;
354 struct pci_dev *dev;
355 struct kvm *kvm;
356 spinlock_t assigned_dev_lock;
357 };
358
359 struct kvm_irq_mask_notifier {
360 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
361 int irq;
362 struct hlist_node link;
363 };
364
365 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
366 struct kvm_irq_mask_notifier *kimn);
367 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
368 struct kvm_irq_mask_notifier *kimn);
369 void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
370
371 int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
372 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
373 void kvm_register_irq_ack_notifier(struct kvm *kvm,
374 struct kvm_irq_ack_notifier *kian);
375 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
376 struct kvm_irq_ack_notifier *kian);
377 int kvm_request_irq_source_id(struct kvm *kvm);
378 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
379
380 /* For vcpu->arch.iommu_flags */
381 #define KVM_IOMMU_CACHE_COHERENCY 0x1
382
383 #ifdef CONFIG_IOMMU_API
384 int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
385 unsigned long npages);
386 int kvm_iommu_map_guest(struct kvm *kvm);
387 int kvm_iommu_unmap_guest(struct kvm *kvm);
388 int kvm_assign_device(struct kvm *kvm,
389 struct kvm_assigned_dev_kernel *assigned_dev);
390 int kvm_deassign_device(struct kvm *kvm,
391 struct kvm_assigned_dev_kernel *assigned_dev);
392 #else /* CONFIG_IOMMU_API */
393 static inline int kvm_iommu_map_pages(struct kvm *kvm,
394 gfn_t base_gfn,
395 unsigned long npages)
396 {
397 return 0;
398 }
399
400 static inline int kvm_iommu_map_guest(struct kvm *kvm)
401 {
402 return -ENODEV;
403 }
404
405 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
406 {
407 return 0;
408 }
409
410 static inline int kvm_assign_device(struct kvm *kvm,
411 struct kvm_assigned_dev_kernel *assigned_dev)
412 {
413 return 0;
414 }
415
416 static inline int kvm_deassign_device(struct kvm *kvm,
417 struct kvm_assigned_dev_kernel *assigned_dev)
418 {
419 return 0;
420 }
421 #endif /* CONFIG_IOMMU_API */
422
423 static inline void kvm_guest_enter(void)
424 {
425 account_system_vtime(current);
426 current->flags |= PF_VCPU;
427 }
428
429 static inline void kvm_guest_exit(void)
430 {
431 account_system_vtime(current);
432 current->flags &= ~PF_VCPU;
433 }
434
435 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
436 {
437 return slot - kvm->memslots;
438 }
439
440 static inline gpa_t gfn_to_gpa(gfn_t gfn)
441 {
442 return (gpa_t)gfn << PAGE_SHIFT;
443 }
444
445 static inline hpa_t pfn_to_hpa(pfn_t pfn)
446 {
447 return (hpa_t)pfn << PAGE_SHIFT;
448 }
449
450 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
451 {
452 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
453 }
454
455 enum kvm_stat_kind {
456 KVM_STAT_VM,
457 KVM_STAT_VCPU,
458 };
459
460 struct kvm_stats_debugfs_item {
461 const char *name;
462 int offset;
463 enum kvm_stat_kind kind;
464 struct dentry *dentry;
465 };
466 extern struct kvm_stats_debugfs_item debugfs_entries[];
467 extern struct dentry *kvm_debugfs_dir;
468
469 #define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
470 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
471 vcpu, 5, d1, d2, d3, d4, d5)
472 #define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
473 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
474 vcpu, 4, d1, d2, d3, d4, 0)
475 #define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
476 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
477 vcpu, 3, d1, d2, d3, 0, 0)
478 #define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
479 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
480 vcpu, 2, d1, d2, 0, 0, 0)
481 #define KVMTRACE_1D(evt, vcpu, d1, name) \
482 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
483 vcpu, 1, d1, 0, 0, 0, 0)
484 #define KVMTRACE_0D(evt, vcpu, name) \
485 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
486 vcpu, 0, 0, 0, 0, 0, 0)
487
488 #ifdef CONFIG_KVM_TRACE
489 int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
490 void kvm_trace_cleanup(void);
491 #else
492 static inline
493 int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
494 {
495 return -EINVAL;
496 }
497 #define kvm_trace_cleanup() ((void)0)
498 #endif
499
500 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
501 static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
502 {
503 if (unlikely(vcpu->kvm->mmu_notifier_count))
504 return 1;
505 /*
506 * Both reads happen under the mmu_lock and both values are
507 * modified under mmu_lock, so there's no need of smb_rmb()
508 * here in between, otherwise mmu_notifier_count should be
509 * read before mmu_notifier_seq, see
510 * mmu_notifier_invalidate_range_end write side.
511 */
512 if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
513 return 1;
514 return 0;
515 }
516 #endif
517
518 #ifdef CONFIG_HAVE_KVM_IRQCHIP
519
520 #define KVM_MAX_IRQ_ROUTES 1024
521
522 int kvm_setup_default_irq_routing(struct kvm *kvm);
523 int kvm_set_irq_routing(struct kvm *kvm,
524 const struct kvm_irq_routing_entry *entries,
525 unsigned nr,
526 unsigned flags);
527 void kvm_free_irq_routing(struct kvm *kvm);
528
529 #else
530
531 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
532
533 #endif
534
535 #ifdef CONFIG_HAVE_KVM_EVENTFD
536
537 void kvm_irqfd_init(struct kvm *kvm);
538 int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
539 void kvm_irqfd_release(struct kvm *kvm);
540
541 #else
542
543 static inline void kvm_irqfd_init(struct kvm *kvm) {}
544 static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
545 {
546 return -EINVAL;
547 }
548
549 static inline void kvm_irqfd_release(struct kvm *kvm) {}
550
551 #endif /* CONFIG_HAVE_KVM_EVENTFD */
552
553 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
554 {
555 return vcpu->kvm->bsp_vcpu == vcpu;
556 }
557 #endif