From dcc17d1baef3721d1574e5b2f4f2d4607514bcff Mon Sep 17 00:00:00 2001 From: Peter Keilty Date: Mon, 31 Oct 2005 16:44:47 -0500 Subject: [PATCH] [IA64] Use bitmaps for efficient context allocation/free Corrects the very inefficent method of finding free context_ids in get_mmu_context(). Instead of walking the task_list of all processes, 2 bitmaps are used to efficently store and lookup state, inuse and needs flushing. The entire rid address space is now used before calling wrap_mmu_context and global tlb flushing. Special thanks to Ken and Rohit for their review and modifications in using a bit flushmap. Signed-off-by: Peter Keilty Signed-off-by: Tony Luck --- arch/ia64/kernel/setup.c | 1 + arch/ia64/mm/tlb.c | 56 ++++++++++++++++------------------ include/asm-ia64/mmu_context.h | 19 +++++++++--- include/asm-ia64/tlbflush.h | 1 + 4 files changed, 44 insertions(+), 33 deletions(-) diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index fc56ca2da358..c9388a92cf4a 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -454,6 +454,7 @@ setup_arch (char **cmdline_p) #endif cpu_init(); /* initialize the bootstrap CPU */ + mmu_context_init(); /* initialize context_id bitmap */ #ifdef CONFIG_ACPI acpi_boot_init(); diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index c79a9b96d02b..39628fca274c 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -8,6 +8,8 @@ * Modified RID allocation for SMP * Goutham Rao * IPI based ptc implementation and A-step IPI implementation. + * Rohit Seth + * Ken Chen */ #include #include @@ -16,12 +18,14 @@ #include #include #include +#include #include #include #include #include #include +#include static struct { unsigned long mask; /* mask of supported purge page-sizes */ @@ -31,49 +35,43 @@ static struct { struct ia64_ctx ia64_ctx = { .lock = SPIN_LOCK_UNLOCKED, .next = 1, - .limit = (1 << 15) - 1, /* start out with the safe (architected) limit */ .max_ctx = ~0U }; DEFINE_PER_CPU(u8, ia64_need_tlb_flush); +/* + * Initializes the ia64_ctx.bitmap array based on max_ctx+1. + * Called after cpu_init() has setup ia64_ctx.max_ctx based on + * maximum RID that is supported by boot CPU. + */ +void __init +mmu_context_init (void) +{ + ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3); + ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3); +} + /* * Acquire the ia64_ctx.lock before calling this function! */ void wrap_mmu_context (struct mm_struct *mm) { - unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx; - struct task_struct *tsk; int i; + unsigned long flush_bit; - if (ia64_ctx.next > max_ctx) - ia64_ctx.next = 300; /* skip daemons */ - ia64_ctx.limit = max_ctx + 1; - - /* - * Scan all the task's mm->context and set proper safe range - */ - - read_lock(&tasklist_lock); - repeat: - for_each_process(tsk) { - if (!tsk->mm) - continue; - tsk_context = tsk->mm->context; - if (tsk_context == ia64_ctx.next) { - if (++ia64_ctx.next >= ia64_ctx.limit) { - /* empty range: reset the range limit and start over */ - if (ia64_ctx.next > max_ctx) - ia64_ctx.next = 300; - ia64_ctx.limit = max_ctx + 1; - goto repeat; - } - } - if ((tsk_context > ia64_ctx.next) && (tsk_context < ia64_ctx.limit)) - ia64_ctx.limit = tsk_context; + for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) { + flush_bit = xchg(&ia64_ctx.flushmap[i], 0); + ia64_ctx.bitmap[i] ^= flush_bit; } - read_unlock(&tasklist_lock); + + /* use offset at 300 to skip daemons */ + ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, + ia64_ctx.max_ctx, 300); + ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, + ia64_ctx.max_ctx, ia64_ctx.next); + /* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */ { int cpu = get_cpu(); /* prevent preemption/migration */ diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h index 8d6e72f7b08e..8d9b30b5f7d4 100644 --- a/include/asm-ia64/mmu_context.h +++ b/include/asm-ia64/mmu_context.h @@ -32,13 +32,17 @@ struct ia64_ctx { spinlock_t lock; unsigned int next; /* next context number to use */ - unsigned int limit; /* next >= limit => must call wrap_mmu_context() */ - unsigned int max_ctx; /* max. context value supported by all CPUs */ + unsigned int limit; /* available free range */ + unsigned int max_ctx; /* max. context value supported by all CPUs */ + /* call wrap_mmu_context when next >= max */ + unsigned long *bitmap; /* bitmap size is max_ctx+1 */ + unsigned long *flushmap;/* pending rid to be flushed */ }; extern struct ia64_ctx ia64_ctx; DECLARE_PER_CPU(u8, ia64_need_tlb_flush); +extern void mmu_context_init (void); extern void wrap_mmu_context (struct mm_struct *mm); static inline void @@ -83,9 +87,16 @@ get_mmu_context (struct mm_struct *mm) context = mm->context; if (context == 0) { cpus_clear(mm->cpu_vm_mask); - if (ia64_ctx.next >= ia64_ctx.limit) - wrap_mmu_context(mm); + if (ia64_ctx.next >= ia64_ctx.limit) { + ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, + ia64_ctx.max_ctx, ia64_ctx.next); + ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, + ia64_ctx.max_ctx, ia64_ctx.next); + if (ia64_ctx.next >= ia64_ctx.max_ctx) + wrap_mmu_context(mm); + } mm->context = context = ia64_ctx.next++; + __set_bit(context, ia64_ctx.bitmap); } } spin_unlock_irqrestore(&ia64_ctx.lock, flags); diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h index b65c62702724..a35b323bae4c 100644 --- a/include/asm-ia64/tlbflush.h +++ b/include/asm-ia64/tlbflush.h @@ -51,6 +51,7 @@ flush_tlb_mm (struct mm_struct *mm) if (!mm) return; + set_bit(mm->context, ia64_ctx.flushmap); mm->context = 0; if (atomic_read(&mm->mm_users) == 0) -- 2.20.1