#define LCTL_OPCODE "lctlg"
#endif
-static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
+static inline void update_user_asce(struct mm_struct *mm)
{
pgd_t *pgd = mm->pgd;
set_fs(current->thread.mm_segment);
}
+static inline void clear_user_asce(struct mm_struct *mm)
+{
+ S390_lowcore.user_asce = S390_lowcore.kernel_asce;
+ asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
+ asm volatile(LCTL_OPCODE" 7,7,%0\n" : : "m" (S390_lowcore.user_asce));
+}
+
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
if (prev == next)
return;
if (atomic_inc_return(&next->context.attach_count) >> 16) {
- /* Delay update_mm until all TLB flushes are done. */
+ /* Delay update_user_asce until all TLB flushes are done. */
set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
+ /* Clear old ASCE by loading the kernel ASCE. */
+ clear_user_asce(next);
} else {
cpumask_set_cpu(cpu, mm_cpumask(next));
- update_mm(next, tsk);
+ update_user_asce(next);
if (next->context.flush_mm)
/* Flush pending TLBs */
__tlb_flush_mm(next);
cpu_relax();
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
- update_mm(mm, tsk);
+ update_user_asce(mm);
if (mm->context.flush_mm)
__tlb_flush_mm(mm);
preempt_enable();
tlb->end = end;
tlb->fullmm = !(start | (end+1));
tlb->batch = NULL;
- if (tlb->fullmm)
- __tlb_flush_mm(mm);
}
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long address)
{
- if (!tlb->fullmm)
- return page_table_free_rcu(tlb, (unsigned long *) pte);
- page_table_free(tlb->mm, (unsigned long *) pte);
+ page_table_free_rcu(tlb, (unsigned long *) pte);
}
/*
#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 31))
return;
- if (!tlb->fullmm)
- return tlb_remove_table(tlb, pmd);
- crst_table_free(tlb->mm, (unsigned long *) pmd);
+ tlb_remove_table(tlb, pmd);
#endif
}
#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 42))
return;
- if (!tlb->fullmm)
- return tlb_remove_table(tlb, pud);
- crst_table_free(tlb->mm, (unsigned long *) pud);
+ tlb_remove_table(tlb, pud);
#endif
}
struct mm_struct *mm = arg;
if (current->active_mm == mm)
- update_mm(mm, current);
+ update_user_asce(mm);
__tlb_flush_local();
}
{
pgd_t *pgd;
- if (current->active_mm == mm)
+ if (current->active_mm == mm) {
+ clear_user_asce(mm);
__tlb_flush_mm(mm);
+ }
while (mm->context.asce_limit > limit) {
pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
crst_table_free(mm, (unsigned long *) pgd);
}
if (current->active_mm == mm)
- update_mm(mm, current);
+ update_user_asce(mm);
}
#endif