KVM: x86: remove code for lazy FPU handling
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / arch / x86 / kvm / x86.c
index 2f64e5d0ae534024904b101423409d4409c17810..c48404017e4fb91cf6df49b3b74cd8953446a201 100644 (file)
@@ -1811,7 +1811,7 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
        struct kvm_vcpu_arch *vcpu = &v->arch;
        struct pvclock_vcpu_time_info guest_hv_clock;
 
-       if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+       if (unlikely(kvm_vcpu_read_guest_cached(v, &vcpu->pv_time,
                &guest_hv_clock, sizeof(guest_hv_clock))))
                return;
 
@@ -1832,9 +1832,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
        BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
 
        vcpu->hv_clock.version = guest_hv_clock.version + 1;
-       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-                               &vcpu->hv_clock,
-                               sizeof(vcpu->hv_clock.version));
+       kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
+                                   &vcpu->hv_clock,
+                                   sizeof(vcpu->hv_clock.version));
 
        smp_wmb();
 
@@ -1848,16 +1848,16 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
 
        trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
 
-       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-                               &vcpu->hv_clock,
-                               sizeof(vcpu->hv_clock));
+       kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
+                                   &vcpu->hv_clock,
+                                   sizeof(vcpu->hv_clock));
 
        smp_wmb();
 
        vcpu->hv_clock.version++;
-       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-                               &vcpu->hv_clock,
-                               sizeof(vcpu->hv_clock.version));
+       kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
+                                   &vcpu->hv_clock,
+                                   sizeof(vcpu->hv_clock.version));
 }
 
 static int kvm_guest_time_update(struct kvm_vcpu *v)
@@ -2090,7 +2090,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
                return 0;
        }
 
-       if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
+       if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.apf.data, gpa,
                                        sizeof(u32)))
                return 1;
 
@@ -2109,7 +2109,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
                return;
 
-       if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+       if (unlikely(kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.st.stime,
                &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
                return;
 
@@ -2120,7 +2120,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 
        vcpu->arch.st.steal.version += 1;
 
-       kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+       kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
                &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 
        smp_wmb();
@@ -2129,14 +2129,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
                vcpu->arch.st.last_steal;
        vcpu->arch.st.last_steal = current->sched_info.run_delay;
 
-       kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+       kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
                &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 
        smp_wmb();
 
        vcpu->arch.st.steal.version += 1;
 
-       kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+       kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
                &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 }
 
@@ -2241,7 +2241,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (!(data & 1))
                        break;
 
-               if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+               if (kvm_vcpu_gfn_to_hva_cache_init(vcpu,
                     &vcpu->arch.pv_time, data & ~1ULL,
                     sizeof(struct pvclock_vcpu_time_info)))
                        vcpu->arch.pv_time_enabled = false;
@@ -2262,7 +2262,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (data & KVM_STEAL_RESERVED_MASK)
                        return 1;
 
-               if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
+               if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.st.stime,
                                                data & KVM_STEAL_VALID_BITS,
                                                sizeof(struct kvm_steal_time)))
                        return 1;
@@ -2672,6 +2672,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_DISABLE_QUIRKS:
        case KVM_CAP_SET_BOOT_CPU_ID:
        case KVM_CAP_SPLIT_IRQCHIP:
+       case KVM_CAP_IMMEDIATE_EXIT:
 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
        case KVM_CAP_ASSIGN_DEV_IRQ:
        case KVM_CAP_PCI_2_3:
@@ -2875,7 +2876,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 
        vcpu->arch.st.steal.preempted = 1;
 
-       kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
+       kvm_vcpu_write_guest_offset_cached(vcpu, &vcpu->arch.st.stime,
                        &vcpu->arch.st.steal.preempted,
                        offsetof(struct kvm_steal_time, preempted),
                        sizeof(vcpu->arch.st.steal.preempted));
@@ -2909,7 +2910,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
                                    struct kvm_lapic_state *s)
 {
-       if (vcpu->arch.apicv_active)
+       if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
                kvm_x86_ops->sync_pir_to_irr(vcpu);
 
        return kvm_apic_get_state(vcpu, s);
@@ -6659,7 +6660,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
        if (irqchip_split(vcpu->kvm))
                kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
        else {
-               if (vcpu->arch.apicv_active)
+               if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
                        kvm_x86_ops->sync_pir_to_irr(vcpu);
                kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
        }
@@ -6750,10 +6751,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        r = 0;
                        goto out;
                }
-               if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
-                       vcpu->fpu_active = 0;
-                       kvm_x86_ops->fpu_deactivate(vcpu);
-               }
                if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
                        /* Page is swapped out. Do synthetic halt */
                        vcpu->arch.apf.halted = true;
@@ -6813,20 +6810,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        kvm_hv_process_stimers(vcpu);
        }
 
-       /*
-        * KVM_REQ_EVENT is not set when posted interrupts are set by
-        * VT-d hardware, so we have to update RVI unconditionally.
-        */
-       if (kvm_lapic_enabled(vcpu)) {
-               /*
-                * Update architecture specific hints for APIC
-                * virtual interrupt delivery.
-                */
-               if (vcpu->arch.apicv_active)
-                       kvm_x86_ops->hwapic_irr_update(vcpu,
-                               kvm_lapic_find_highest_irr(vcpu));
-       }
-
        if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
                ++vcpu->stat.req_event;
                kvm_apic_accept_events(vcpu);
@@ -6869,22 +6852,40 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        preempt_disable();
 
        kvm_x86_ops->prepare_guest_switch(vcpu);
-       if (vcpu->fpu_active)
-               kvm_load_guest_fpu(vcpu);
+       kvm_load_guest_fpu(vcpu);
+
+       /*
+        * Disable IRQs before setting IN_GUEST_MODE.  Posted interrupt
+        * IPI are then delayed after guest entry, which ensures that they
+        * result in virtual interrupt delivery.
+        */
+       local_irq_disable();
        vcpu->mode = IN_GUEST_MODE;
 
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 
        /*
-        * We should set ->mode before check ->requests,
-        * Please see the comment in kvm_make_all_cpus_request.
-        * This also orders the write to mode from any reads
-        * to the page tables done while the VCPU is running.
-        * Please see the comment in kvm_flush_remote_tlbs.
+        * 1) We should set ->mode before checking ->requests.  Please see
+        * the comment in kvm_make_all_cpus_request.
+        *
+        * 2) For APICv, we should set ->mode before checking PIR.ON.  This
+        * pairs with the memory barrier implicit in pi_test_and_set_on
+        * (see vmx_deliver_posted_interrupt).
+        *
+        * 3) This also orders the write to mode from any reads to the page
+        * tables done while the VCPU is running.  Please see the comment
+        * in kvm_flush_remote_tlbs.
         */
        smp_mb__after_srcu_read_unlock();
 
-       local_irq_disable();
+       /*
+        * This handles the case where a posted interrupt was
+        * notified with kvm_vcpu_kick.
+        */
+       if (kvm_lapic_enabled(vcpu)) {
+               if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
+                       kvm_x86_ops->sync_pir_to_irr(vcpu);
+       }
 
        if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
            || need_resched() || signal_pending(current)) {
@@ -7023,6 +7024,9 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
 
 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
 {
+       if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
+               kvm_x86_ops->check_nested_events(vcpu, false);
+
        return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
                !vcpu->arch.apf.halted);
 }
@@ -7194,7 +7198,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        } else
                WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
 
-       r = vcpu_run(vcpu);
+       if (kvm_run->immediate_exit)
+               r = -EINTR;
+       else
+               r = vcpu_run(vcpu);
 
 out:
        post_kvm_run_save(vcpu);
@@ -8389,9 +8396,6 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
-       if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
-               kvm_x86_ops->check_nested_events(vcpu, false);
-
        return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
 }
 
@@ -8528,9 +8532,8 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
 
 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
 {
-
-       return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
-                                     sizeof(val));
+       return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val,
+                                          sizeof(val));
 }
 
 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,