From 3cbc564024d8f174202f023e8a2991782f6a9431 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Fri, 23 Jun 2006 02:05:40 -0700 Subject: [PATCH] [PATCH] percpu_counters: create lib/percpu_counter.c - Move percpu_counter routines from mm/swap.c to lib/percpu_counter.c Signed-off-by: Ravikiran Thirumalai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Makefile | 1 + lib/percpu_counter.c | 46 ++++++++++++++++++++++++++++++++++++++++++++ mm/swap.c | 42 ---------------------------------------- 3 files changed, 47 insertions(+), 42 deletions(-) create mode 100644 lib/percpu_counter.c diff --git a/lib/Makefile b/lib/Makefile index b830c9a1554..79358ad1f11 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_TEXTSEARCH) += textsearch.o obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o +obj-$(CONFIG_SMP) += percpu_counter.o obj-$(CONFIG_SWIOTLB) += swiotlb.o diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c new file mode 100644 index 00000000000..7a87003f8e8 --- /dev/null +++ b/lib/percpu_counter.c @@ -0,0 +1,46 @@ +/* + * Fast batching percpu counters. + */ + +#include +#include + +void percpu_counter_mod(struct percpu_counter *fbc, long amount) +{ + long count; + long *pcount; + int cpu = get_cpu(); + + pcount = per_cpu_ptr(fbc->counters, cpu); + count = *pcount + amount; + if (count >= FBC_BATCH || count <= -FBC_BATCH) { + spin_lock(&fbc->lock); + fbc->count += count; + *pcount = 0; + spin_unlock(&fbc->lock); + } else { + *pcount = count; + } + put_cpu(); +} +EXPORT_SYMBOL(percpu_counter_mod); + +/* + * Add up all the per-cpu counts, return the result. This is a more accurate + * but much slower version of percpu_counter_read_positive() + */ +long percpu_counter_sum(struct percpu_counter *fbc) +{ + long ret; + int cpu; + + spin_lock(&fbc->lock); + ret = fbc->count; + for_each_possible_cpu(cpu) { + long *pcount = per_cpu_ptr(fbc->counters, cpu); + ret += *pcount; + } + spin_unlock(&fbc->lock); + return ret < 0 ? 0 : ret; +} +EXPORT_SYMBOL(percpu_counter_sum); diff --git a/mm/swap.c b/mm/swap.c index 88895c249bc..03ae2076f92 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -480,48 +480,6 @@ static int cpu_swap_callback(struct notifier_block *nfb, #endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_SMP */ -#ifdef CONFIG_SMP -void percpu_counter_mod(struct percpu_counter *fbc, long amount) -{ - long count; - long *pcount; - int cpu = get_cpu(); - - pcount = per_cpu_ptr(fbc->counters, cpu); - count = *pcount + amount; - if (count >= FBC_BATCH || count <= -FBC_BATCH) { - spin_lock(&fbc->lock); - fbc->count += count; - *pcount = 0; - spin_unlock(&fbc->lock); - } else { - *pcount = count; - } - put_cpu(); -} -EXPORT_SYMBOL(percpu_counter_mod); - -/* - * Add up all the per-cpu counts, return the result. This is a more accurate - * but much slower version of percpu_counter_read_positive() - */ -long percpu_counter_sum(struct percpu_counter *fbc) -{ - long ret; - int cpu; - - spin_lock(&fbc->lock); - ret = fbc->count; - for_each_possible_cpu(cpu) { - long *pcount = per_cpu_ptr(fbc->counters, cpu); - ret += *pcount; - } - spin_unlock(&fbc->lock); - return ret < 0 ? 0 : ret; -} -EXPORT_SYMBOL(percpu_counter_sum); -#endif - /* * Perform any setup for the swap system */ -- 2.20.1