x86/asm: Tidy up TSS limit code
authorAndy Lutomirski <luto@kernel.org>
Wed, 22 Feb 2017 15:36:16 +0000 (07:36 -0800)
committerRadim Krčmář <rkrcmar@redhat.com>
Wed, 1 Mar 2017 16:03:22 +0000 (17:03 +0100)
In an earlier version of the patch ("x86/kvm/vmx: Defer TR reload
after VM exit") that introduced TSS limit validity tracking, I
confused which helper was which.  On reflection, the names I chose
sucked.  Rename the helpers to make it more obvious what's going on
and add some comments.

While I'm at it, clear __tss_limit_invalid when force-reloading as
well as when contitionally reloading, since any TR reload fixes the
limit.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
arch/x86/include/asm/desc.h
arch/x86/kernel/ioport.c
arch/x86/kernel/process.c

index cb8f9149f6c852377b34bae742bbd54a3de53cf8..1548ca92ad3f620d48bce51537d24e0701212a42 100644 (file)
@@ -205,6 +205,8 @@ static inline void native_load_tr_desc(void)
        asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
 }
 
+DECLARE_PER_CPU(bool, __tss_limit_invalid);
+
 static inline void force_reload_TR(void)
 {
        struct desc_struct *d = get_cpu_gdt_table(smp_processor_id());
@@ -220,18 +222,20 @@ static inline void force_reload_TR(void)
        write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS);
 
        load_TR_desc();
+       this_cpu_write(__tss_limit_invalid, false);
 }
 
-DECLARE_PER_CPU(bool, need_tr_refresh);
-
-static inline void refresh_TR(void)
+/*
+ * Call this if you need the TSS limit to be correct, which should be the case
+ * if and only if you have TIF_IO_BITMAP set or you're switching to a task
+ * with TIF_IO_BITMAP set.
+ */
+static inline void refresh_tss_limit(void)
 {
        DEBUG_LOCKS_WARN_ON(preemptible());
 
-       if (unlikely(this_cpu_read(need_tr_refresh))) {
+       if (unlikely(this_cpu_read(__tss_limit_invalid)))
                force_reload_TR();
-               this_cpu_write(need_tr_refresh, false);
-       }
 }
 
 /*
@@ -250,7 +254,7 @@ static inline void invalidate_tss_limit(void)
        if (unlikely(test_thread_flag(TIF_IO_BITMAP)))
                force_reload_TR();
        else
-               this_cpu_write(need_tr_refresh, true);
+               this_cpu_write(__tss_limit_invalid, true);
 }
 
 static inline void native_load_gdt(const struct desc_ptr *dtr)
index b01bc851745048f7bdcbf8c9fb1b9fcc383b1b28..875d3d25dd6a68edfd019034ebd4b392150d18e6 100644 (file)
@@ -47,8 +47,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
                t->io_bitmap_ptr = bitmap;
                set_thread_flag(TIF_IO_BITMAP);
 
+               /*
+                * Now that we have an IO bitmap, we need our TSS limit to be
+                * correct.  It's fine if we are preempted after doing this:
+                * with TIF_IO_BITMAP set, context switches will keep our TSS
+                * limit correct.
+                */
                preempt_disable();
-               refresh_TR();
+               refresh_tss_limit();
                preempt_enable();
        }
 
index 7780efa635b911cff705e0bd73abb6064a3e8f62..0b302591b51f2ec4f4e06d534720261f2adccb2c 100644 (file)
@@ -65,8 +65,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
 };
 EXPORT_PER_CPU_SYMBOL(cpu_tss);
 
-DEFINE_PER_CPU(bool, need_tr_refresh);
-EXPORT_PER_CPU_SYMBOL_GPL(need_tr_refresh);
+DEFINE_PER_CPU(bool, __tss_limit_invalid);
+EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
 
 /*
  * this gets called so that we can store lazy state into memory and copy the
@@ -218,7 +218,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
                 * Make sure that the TSS limit is correct for the CPU
                 * to notice the IO bitmap.
                 */
-               refresh_TR();
+               refresh_tss_limit();
        } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
                /*
                 * Clear any possible leftover bits: