int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem,
- bool user_alloc)
+ struct kvm_userspace_memory_region *mem)
{
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- bool user_alloc)
+ struct kvm_memory_slot old)
{
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem,
- bool user_alloc)
+ struct kvm_userspace_memory_region *mem)
{
unsigned long i;
unsigned long pfn;
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- bool user_alloc)
+ struct kvm_memory_slot old)
{
return;
}
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem,
- bool user_alloc)
+ struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem)
{
return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- bool user_alloc)
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old)
{
kvmppc_core_commit_memory_region(kvm, mem, old);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem,
- bool user_alloc)
+ struct kvm_userspace_memory_region *mem)
{
/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
if (mem->memory_size & 0xffffful)
return -EINVAL;
- if (!user_alloc)
- return -EINVAL;
-
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- bool user_alloc)
+ struct kvm_memory_slot old)
{
int rc;
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem,
- bool user_alloc)
+ struct kvm_userspace_memory_region *mem)
{
int npages = memslot->npages;
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- bool user_alloc)
+ struct kvm_memory_slot old)
{
int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
- struct kvm_userspace_memory_region *mem,
- bool user_alloc);
+ struct kvm_userspace_memory_region *mem);
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- bool user_alloc);
+ struct kvm_memory_slot old);
bool kvm_largepages_enabled(void);
void kvm_disable_largepages(void);
/* flush all memory translations */
slots = old_memslots;
}
- r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
+ r = kvm_arch_prepare_memory_region(kvm, &new, old, mem);
if (r)
goto out_slots;
old_memslots = install_new_memslots(kvm, slots, &new);
- kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
+ kvm_arch_commit_memory_region(kvm, mem, old);
kvm_free_physmem_slot(&old, &new);
kfree(old_memslots);