cpumask: use mm_cpumask() wrapper: x86
authorRusty Russell <rusty@rustcorp.com.au>
Thu, 24 Sep 2009 15:34:51 +0000 (09:34 -0600)
committerRusty Russell <rusty@rustcorp.com.au>
Thu, 24 Sep 2009 00:04:52 +0000 (09:34 +0930)
Makes code futureproof against the impending change to mm->cpu_vm_mask (to be a pointer).

It's also a chance to use the new cpumask_ ops which take a pointer
(the older ones are deprecated, but there's no hurry for arch code).

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
arch/x86/include/asm/mmu_context.h
arch/x86/kernel/ldt.c
arch/x86/mm/tlb.c
arch/x86/xen/mmu.c

index f923203dc39a68f04e6c0ea05fe7915c25b0b969..4a2d4e0c18d99cf635b02820070330f82dccbe63 100644 (file)
@@ -37,12 +37,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 
        if (likely(prev != next)) {
                /* stop flush ipis for the previous mm */
-               cpu_clear(cpu, prev->cpu_vm_mask);
+               cpumask_clear_cpu(cpu, mm_cpumask(prev));
 #ifdef CONFIG_SMP
                percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
                percpu_write(cpu_tlbstate.active_mm, next);
 #endif
-               cpu_set(cpu, next->cpu_vm_mask);
+               cpumask_set_cpu(cpu, mm_cpumask(next));
 
                /* Re-load page tables */
                load_cr3(next->pgd);
@@ -58,7 +58,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
                BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
 
-               if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
+               if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
                        /* We were in lazy tlb mode and leave_mm disabled
                         * tlb flush IPI delivery. We must reload CR3
                         * to make sure to use no freed page tables.
index 71f1d99a635d75dd7f30ee1f1876b91112d2a82c..ec6ef60cbd170b0809d20287a60c226fb8fc25ed 100644 (file)
@@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
 #ifdef CONFIG_SMP
                preempt_disable();
                load_LDT(pc);
-               if (!cpus_equal(current->mm->cpu_vm_mask,
-                               cpumask_of_cpu(smp_processor_id())))
+               if (!cpumask_equal(mm_cpumask(current->mm),
+                                  cpumask_of(smp_processor_id())))
                        smp_call_function(flush_ldt, current->mm, 1);
                preempt_enable();
 #else
index c814e144a3f0724b1d4ce56a74ca03b70cc82f25..36fe08eeb5c3e0de7c51094c111a58230351f1d1 100644 (file)
@@ -59,7 +59,8 @@ void leave_mm(int cpu)
 {
        if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
                BUG();
-       cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
+       cpumask_clear_cpu(cpu,
+                         mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
        load_cr3(swapper_pg_dir);
 }
 EXPORT_SYMBOL_GPL(leave_mm);
@@ -234,8 +235,8 @@ void flush_tlb_current_task(void)
        preempt_disable();
 
        local_flush_tlb();
-       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
-               flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
+       if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
        preempt_enable();
 }
 
@@ -249,8 +250,8 @@ void flush_tlb_mm(struct mm_struct *mm)
                else
                        leave_mm(smp_processor_id());
        }
-       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
-               flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
+       if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
 
        preempt_enable();
 }
@@ -268,8 +269,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
                        leave_mm(smp_processor_id());
        }
 
-       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
-               flush_tlb_others(&mm->cpu_vm_mask, mm, va);
+       if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(mm_cpumask(mm), mm, va);
 
        preempt_enable();
 }
index 093dd59b53856c8629cbd48ac7d9f031162d060e..3bf7b1d250ce986d02bc44de0bd2086f6e37b824 100644 (file)
@@ -1165,14 +1165,14 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
        /* Get the "official" set of cpus referring to our pagetable. */
        if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
                for_each_online_cpu(cpu) {
-                       if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
+                       if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
                            && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
                                continue;
                        smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
                }
                return;
        }
-       cpumask_copy(mask, &mm->cpu_vm_mask);
+       cpumask_copy(mask, mm_cpumask(mm));
 
        /* It's possible that a vcpu may have a stale reference to our
           cr3, because its in lazy mode, and it hasn't yet flushed