KVM: Remove unnecessary divide operations
authorJoerg Roedel <joerg.roedel@amd.com>
Thu, 1 Jul 2010 14:00:11 +0000 (16:00 +0200)
committerAvi Kivity <avi@redhat.com>
Sun, 1 Aug 2010 07:47:30 +0000 (10:47 +0300)
This patch converts unnecessary divide and modulo operations
in the KVM large page related code into logical operations.
This allows to convert gfn_t to u64 while not breaking 32
bit builds.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/ia64/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_host.h
arch/s390/include/asm/kvm_host.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
virt/kvm/kvm_main.c

index a362e67e0ca6edec83923e42a655d61d639001c8..2f229e5de4980885bbdab5dcd614495bf8e9abd3 100644 (file)
@@ -235,6 +235,7 @@ struct kvm_vm_data {
 #define KVM_REQ_PTC_G          32
 #define KVM_REQ_RESUME         33
 
+#define KVM_HPAGE_GFN_SHIFT(x) 0
 #define KVM_NR_PAGE_SIZES      1
 #define KVM_PAGES_PER_HPAGE(x) 1
 
index e004eafcd3f06f53b19834bea122b030af51ec9e..b0b23c007d6e7ecba2501b7eb8d35a5ccb7b1e70 100644 (file)
@@ -35,6 +35,7 @@
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 
 /* We don't currently support large pages. */
+#define KVM_HPAGE_GFN_SHIFT(x) 0
 #define KVM_NR_PAGE_SIZES      1
 #define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
 
index b95710a1f5dd479044dda36e1aedf4070104728e..cef7dbf69dfcea3dd28f70485cb2aa7d65bc5ce2 100644 (file)
@@ -41,7 +41,8 @@ struct sca_block {
 } __attribute__((packed));
 
 #define KVM_NR_PAGE_SIZES 2
-#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + ((x) - 1) * 8)
+#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 8)
+#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
 #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
 #define KVM_HPAGE_MASK(x)      (~(KVM_HPAGE_SIZE(x) - 1))
 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
index 2bda62485c4cbfcd4bf872697a9f15ebb599b3c7..50c79b9f5c3889039dee07576615bcf5b16e78a4 100644 (file)
@@ -44,7 +44,8 @@
 
 /* KVM Hugepage definitions for x86 */
 #define KVM_NR_PAGE_SIZES      3
-#define KVM_HPAGE_SHIFT(x)     (PAGE_SHIFT + (((x) - 1) * 9))
+#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
+#define KVM_HPAGE_SHIFT(x)     (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
 #define KVM_HPAGE_SIZE(x)      (1UL << KVM_HPAGE_SHIFT(x))
 #define KVM_HPAGE_MASK(x)      (~(KVM_HPAGE_SIZE(x) - 1))
 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
index ca07ed083b59a4d1cc8d436215528dbcf9b3a384..a20fd613acfe5afd1a65cf14d11c30c25be9b324 100644 (file)
@@ -423,8 +423,8 @@ static int *slot_largepage_idx(gfn_t gfn,
 {
        unsigned long idx;
 
-       idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
-             (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
+       idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
+             (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
        return &slot->lpage_info[level - 2][idx].write_count;
 }
 
@@ -528,8 +528,8 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
        if (likely(level == PT_PAGE_TABLE_LEVEL))
                return &slot->rmap[gfn - slot->base_gfn];
 
-       idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
-               (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
+       idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
+               (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
 
        return &slot->lpage_info[level - 2][idx].rmap_pde;
 }
index ec2e3c6ac7ed9985cc7505a502c6c1c357d3fc33..a60b6b053b6fca12cb33056752d69e5fee2805fa 100644 (file)
@@ -626,9 +626,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
                if (new.lpage_info[i])
                        continue;
 
-               lpages = 1 + (base_gfn + npages - 1) /
-                            KVM_PAGES_PER_HPAGE(level);
-               lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
+               lpages = 1 + ((base_gfn + npages - 1)
+                            >> KVM_HPAGE_GFN_SHIFT(level));
+               lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
 
                new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
 
@@ -638,9 +638,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
                memset(new.lpage_info[i], 0,
                       lpages * sizeof(*new.lpage_info[i]));
 
-               if (base_gfn % KVM_PAGES_PER_HPAGE(level))
+               if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
                        new.lpage_info[i][0].write_count = 1;
-               if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
+               if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
                        new.lpage_info[i][lpages - 1].write_count = 1;
                ugfn = new.userspace_addr >> PAGE_SHIFT;
                /*