import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mm / context.c
index 2ac37372ef52f4ba4db642d39bef349798f1785a..b483f32b1746818866b146de5fc75e9412a5ca53 100644 (file)
  * non 64-bit operations.
  */
 #define ASID_FIRST_VERSION     (1ULL << ASID_BITS)
-#define NUM_USER_ASIDS         (ASID_FIRST_VERSION - 1)
-
-#define ASID_TO_IDX(asid)      ((asid & ~ASID_MASK) - 1)
-#define IDX_TO_ASID(idx)       ((idx + 1) & ~ASID_MASK)
+#define NUM_USER_ASIDS         ASID_FIRST_VERSION
 
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
 
-DEFINE_PER_CPU(atomic64_t, active_asids);
+static DEFINE_PER_CPU(atomic64_t, active_asids);
 static DEFINE_PER_CPU(u64, reserved_asids);
 static cpumask_t tlb_flush_pending;
 
+#ifdef CONFIG_ARM_ERRATA_798181
+void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+                            cpumask_t *mask)
+{
+       int cpu;
+       unsigned long flags;
+       u64 context_id, asid;
+
+       raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+       context_id = mm->context.id.counter;
+       for_each_online_cpu(cpu) {
+               if (cpu == this_cpu)
+                       continue;
+               /*
+                * We only need to send an IPI if the other CPUs are
+                * running the same ASID as the one being invalidated.
+                */
+               asid = per_cpu(active_asids, cpu).counter;
+               if (asid == 0)
+                       asid = per_cpu(reserved_asids, cpu);
+               if (context_id == asid)
+                       cpumask_set_cpu(cpu, mask);
+       }
+       raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+}
+#endif
+
 #ifdef CONFIG_ARM_LPAE
 static void cpu_set_reserved_ttbr0(void)
 {
@@ -128,7 +152,16 @@ static void flush_context(unsigned int cpu)
                        asid = 0;
                } else {
                        asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
-                       __set_bit(ASID_TO_IDX(asid), asid_map);
+                       /*
+                        * If this CPU has already been through a
+                        * rollover, but hasn't run another task in
+                        * the meantime, we must preserve its reserved
+                        * ASID, as this is the only trace we have of
+                        * the process it is still running.
+                        */
+                       if (asid == 0)
+                               asid = per_cpu(reserved_asids, i);
+                       __set_bit(asid & ~ASID_MASK, asid_map);
                }
                per_cpu(reserved_asids, i) = asid;
        }
@@ -167,17 +200,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
                /*
                 * Allocate a free ASID. If we can't find one, take a
                 * note of the currently active ASIDs and mark the TLBs
-                * as requiring flushes.
+                * as requiring flushes. We always count from ASID #1,
+                * as we reserve ASID #0 to switch via TTBR0 and indicate
+                * rollover events.
                 */
-               asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+               asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
                if (asid == NUM_USER_ASIDS) {
                        generation = atomic64_add_return(ASID_FIRST_VERSION,
                                                         &asid_generation);
                        flush_context(cpu);
-                       asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+                       asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
                }
                __set_bit(asid, asid_map);
-               asid = generation | IDX_TO_ASID(asid);
+               asid |= generation;
                cpumask_clear(mm_cpumask(mm));
        }
 
@@ -215,7 +250,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
        if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
                local_flush_bp_all();
                local_flush_tlb_all();
-               dummy_flush_tlb_a15_erratum();
+               erratum_a15_798181();
        }
 
        atomic64_set(&per_cpu(active_asids, cpu), asid);