x86/mm: Fix missed global TLB flush stat
authorDave Hansen <dave.hansen@linux.intel.com>
Thu, 31 Jul 2014 15:40:56 +0000 (08:40 -0700)
committerH. Peter Anvin <hpa@linux.intel.com>
Thu, 31 Jul 2014 15:48:50 +0000 (08:48 -0700)
If we take the

if (end == TLB_FLUSH_ALL || vmflag & VM_HUGETLB) {
local_flush_tlb();
goto out;
}

path out of flush_tlb_mm_range(), we will have flushed the tlb,
but not incremented NR_TLB_LOCAL_FLUSH_ALL.  This unifies the
way out of the function so that we always take a single path when
doing a full tlb flush.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: http://lkml.kernel.org/r/20140731154056.FF763B76@viggo.jf.intel.com
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/mm/tlb.c

index dff6ddebc45f4da67f76516123dca3679695c563..ae584d09e8b03608346b5b29a288dd96d166fee6 100644 (file)
@@ -164,8 +164,9 @@ unsigned long tlb_single_page_flush_ceiling = 1;
 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                                unsigned long end, unsigned long vmflag)
 {
-       int need_flush_others_all = 1;
        unsigned long addr;
+       /* do a global flush by default */
+       unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
 
        preempt_disable();
        if (current->active_mm != mm)
@@ -176,16 +177,14 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                goto out;
        }
 
-       if (end == TLB_FLUSH_ALL || vmflag & VM_HUGETLB) {
-               local_flush_tlb();
-               goto out;
-       }
+       if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
+               base_pages_to_flush = (end - start) >> PAGE_SHIFT;
 
-       if ((end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
+       if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
+               base_pages_to_flush = TLB_FLUSH_ALL;
                count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
                local_flush_tlb();
        } else {
-               need_flush_others_all = 0;
                /* flush range by one by one 'invlpg' */
                for (addr = start; addr < end;  addr += PAGE_SIZE) {
                        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
@@ -193,7 +192,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                }
        }
 out:
-       if (need_flush_others_all) {
+       if (base_pages_to_flush == TLB_FLUSH_ALL) {
                start = 0UL;
                end = TLB_FLUSH_ALL;
        }