From: Linus Torvalds Date: Mon, 8 May 2017 19:37:56 +0000 (-0700) Subject: Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=2d3e4866dea96b0506395b47bfefb234f2088dac;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git Merge tag 'for-linus' of git://git./virt/kvm/kvm Pull KVM updates from Paolo Bonzini: "ARM: - HYP mode stub supports kexec/kdump on 32-bit - improved PMU support - virtual interrupt controller performance improvements - support for userspace virtual interrupt controller (slower, but necessary for KVM on the weird Broadcom SoCs used by the Raspberry Pi 3) MIPS: - basic support for hardware virtualization (ImgTec P5600/P6600/I6400 and Cavium Octeon III) PPC: - in-kernel acceleration for VFIO s390: - support for guests without storage keys - adapter interruption suppression x86: - usual range of nVMX improvements, notably nested EPT support for accessed and dirty bits - emulation of CPL3 CPUID faulting generic: - first part of VCPU thread request API - kvm_stat improvements" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (227 commits) kvm: nVMX: Don't validate disabled secondary controls KVM: put back #ifndef CONFIG_S390 around kvm_vcpu_kick Revert "KVM: Support vCPU-based gfn->hva cache" tools/kvm: fix top level makefile KVM: x86: don't hold kvm->lock in KVM_SET_GSI_ROUTING KVM: Documentation: remove VM mmap documentation kvm: nVMX: Remove superfluous VMX instruction fault checks KVM: x86: fix emulation of RSM and IRET instructions KVM: mark requests that need synchronization KVM: return if kvm_vcpu_wake_up() did wake up the VCPU KVM: add explicit barrier to kvm_vcpu_kick KVM: perform a wake_up in kvm_make_all_cpus_request KVM: mark requests that do not need a wakeup KVM: remove #ifndef CONFIG_S390 around kvm_vcpu_wake_up KVM: x86: always use kvm_make_request instead of set_bit KVM: add kvm_{test,clear}_request to replace {test,clear}_bit s390: kvm: Cpu model support for msa6, msa7 and msa8 KVM: x86: remove irq disablement around KVM_SET_CLOCK/KVM_GET_CLOCK kvm: better MWAIT emulation for guests KVM: x86: virtualize cpuid faulting ... --- 2d3e4866dea96b0506395b47bfefb234f2088dac diff --cc arch/arm/kvm/arm.c index 314eb6abe1ff,8966e59806aa..8a31906bdc9b --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@@ -1121,21 -1138,10 +1138,13 @@@ static void cpu_hyp_reinit(void */ __cpu_init_stage2(); } else { - if (__hyp_get_vectors() == hyp_default_vectors) - cpu_init_hyp_mode(NULL); + cpu_init_hyp_mode(NULL); } + + if (vgic_present) + kvm_vgic_init_cpu_hardware(); } - static void cpu_hyp_reset(void) - { - if (!is_kernel_in_hyp_mode()) - __cpu_reset_hyp_mode(hyp_default_vectors, - kvm_get_idmap_start()); - } - static void _kvm_arch_hardware_enable(void *discard) { if (!__this_cpu_read(kvm_arm_hardware_enabled)) { diff --cc arch/arm/mm/mmu.c index 347cca965783,e98a2b5c4e85..31af3cb59a60 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@@ -1639,10 -1637,7 +1641,13 @@@ void __init paging_init(const struct ma empty_zero_page = virt_to_page(zero_page); __flush_dcache_page(NULL, empty_zero_page); + + /* Compute the virt/idmap offset, mostly for the sake of KVM */ + kimage_voffset = (unsigned long)&kimage_voffset - virt_to_idmap(&kimage_voffset); } + +void __init early_mm_init(const struct machine_desc *mdesc) +{ + build_mem_type_table(); + early_paging_init(mdesc); +} diff --cc arch/s390/kvm/kvm-s390.c index d5c5c911821a,4bafb0a0c8b5..aeb3feb9de53 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@@ -273,9 -273,13 +273,13 @@@ static void kvm_s390_cpu_feat_init(void kvm_s390_available_subfunc.pcc); } if (test_facility(57)) /* MSA5 */ - __cpacf_query(CPACF_PPNO, (cpacf_mask_t *) + __cpacf_query(CPACF_PRNO, (cpacf_mask_t *) kvm_s390_available_subfunc.ppno); + if (test_facility(146)) /* MSA8 */ + __cpacf_query(CPACF_KMA, (cpacf_mask_t *) + kvm_s390_available_subfunc.kma); + if (MACHINE_HAS_ESOP) allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP); /* diff --cc arch/x86/kvm/vmx.c index 1a471e5f963f,e7d929103f4a..c5fd459c4043 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@@ -3462,13 -3444,11 +3442,9 @@@ static int hardware_enable(void /* enable and lock */ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); } - cr4_set_bits(X86_CR4_VMXE); - - if (vmm_exclusive) { - kvm_cpu_vmxon(phys_addr); - ept_sync_global(); - } + kvm_cpu_vmxon(phys_addr); + ept_sync_global(); - native_store_gdt(this_cpu_ptr(&host_gdt)); - return 0; } @@@ -10266,21 -10163,11 +10162,23 @@@ static int prepare_vmcs02(struct kvm_vc } + if (enable_pml) { + /* + * Conceptually we want to copy the PML address and index from + * vmcs01 here, and then back to vmcs01 on nested vmexit. But, + * since we always flush the log on each vmexit, this happens + * to be equivalent to simply resetting the fields in vmcs02. + */ + ASSERT(vmx->pml_pg); + vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); + vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); + } + if (nested_cpu_has_ept(vmcs12)) { - kvm_mmu_unload(vcpu); - nested_ept_init_mmu_context(vcpu); + if (nested_ept_init_mmu_context(vcpu)) { + *entry_failure_code = ENTRY_FAIL_DEFAULT; + return 1; + } } else if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { vmx_flush_tlb_ept_only(vcpu); diff --cc virt/kvm/arm/vgic/vgic-v2.c index b637d9c7afe3,025b57d5787e..a65757aab6d3 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@@ -22,36 -22,7 +22,22 @@@ #include "vgic.h" - /* - * Call this function to convert a u64 value to an unsigned long * bitmask - * in a way that works on both 32-bit and 64-bit LE and BE platforms. - * - * Warning: Calling this function may modify *val. - */ - static unsigned long *u64_to_bitmask(u64 *val) - { - #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32 - *val = (*val >> 32) | (*val << 32); - #endif - return (unsigned long *)val; - } - +static inline void vgic_v2_write_lr(int lr, u32 val) +{ + void __iomem *base = kvm_vgic_global_state.vctrl_base; + + writel_relaxed(val, base + GICH_LR0 + (lr * 4)); +} + +void vgic_v2_init_lrs(void) +{ + int i; + + for (i = 0; i < kvm_vgic_global_state.nr_lr; i++) + vgic_v2_write_lr(i, 0); +} + - void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu) + void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) { struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; @@@ -206,10 -160,10 +175,10 @@@ void vgic_v2_set_vmcr(struct kvm_vcpu * GICH_VMCR_ALIAS_BINPOINT_MASK; vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK; - vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & - GICH_VMCR_PRIMASK_MASK; + vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) << + GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK; - vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; + cpu_if->vgic_vmcr = vmcr; } void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) diff --cc virt/kvm/arm/vgic/vgic.h index 6cf557e9f718,44445dac0835..799fd651b260 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@@ -137,7 -129,8 +136,9 @@@ int vgic_v2_map_resources(struct kvm *k int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, enum vgic_type); +void vgic_v2_init_lrs(void); + void vgic_v2_load(struct kvm_vcpu *vcpu); + void vgic_v2_put(struct kvm_vcpu *vcpu); static inline void vgic_get_irq_kref(struct vgic_irq *irq) {