core: Replace __get_cpu_var with __this_cpu_read if not used for an address.
authorChristoph Lameter <cl@linux.com>
Wed, 8 Dec 2010 15:22:55 +0000 (16:22 +0100)
committerTejun Heo <tj@kernel.org>
Fri, 17 Dec 2010 14:07:19 +0000 (15:07 +0100)
__get_cpu_var() can be replaced with this_cpu_read and will then use a
single read instruction with implied address calculation to access the
correct per cpu instance.

However, the address of a per cpu variable passed to __this_cpu_read()
cannot be determined (since it's an implied address conversion through
segment prefixes).  Therefore apply this only to uses of __get_cpu_var
where the address of the variable is not used.

Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Hugh Dickins <hughd@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
13 files changed:
include/asm-generic/irq_regs.h
include/linux/elevator.h
include/linux/kernel_stat.h
kernel/exit.c
kernel/fork.c
kernel/hrtimer.c
kernel/printk.c
kernel/rcutree.c
kernel/softirq.c
kernel/time/tick-common.c
kernel/time/tick-oneshot.c
kernel/watchdog.c
mm/slab.c

index 5ae1d07d4a1275a8ac91996b23753270e5033cb3..6bf9355fa7eb5097c59454ac8d98e46e09c1f621 100644 (file)
@@ -22,15 +22,15 @@ DECLARE_PER_CPU(struct pt_regs *, __irq_regs);
 
 static inline struct pt_regs *get_irq_regs(void)
 {
-       return __get_cpu_var(__irq_regs);
+       return __this_cpu_read(__irq_regs);
 }
 
 static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
 {
-       struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs);
+       struct pt_regs *old_regs;
 
-       old_regs = *pp_regs;
-       *pp_regs = new_regs;
+       old_regs = __this_cpu_read(__irq_regs);
+       __this_cpu_write(__irq_regs, new_regs);
        return old_regs;
 }
 
index 4fd978e7eb83ef8d689d0d313b5441b0631ea275..4d857973d2c94317cf11041a4a7070794fc13a99 100644 (file)
@@ -195,15 +195,9 @@ enum {
 /*
  * io context count accounting
  */
-#define elv_ioc_count_mod(name, __val)                         \
-       do {                                                    \
-               preempt_disable();                              \
-               __get_cpu_var(name) += (__val);                 \
-               preempt_enable();                               \
-       } while (0)
-
-#define elv_ioc_count_inc(name)        elv_ioc_count_mod(name, 1)
-#define elv_ioc_count_dec(name)        elv_ioc_count_mod(name, -1)
+#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val)
+#define elv_ioc_count_inc(name)        this_cpu_inc(name)
+#define elv_ioc_count_dec(name)        this_cpu_dec(name)
 
 #define elv_ioc_count_read(name)                               \
 ({                                                             \
index ad54c846911b91a169b903f7b1f6fee24d4320a2..44e83ba12b5b1076e4a1af7624420a9c2762ed2f 100644 (file)
@@ -47,7 +47,7 @@ extern unsigned long long nr_context_switches(void);
 
 #ifndef CONFIG_GENERIC_HARDIRQS
 #define kstat_irqs_this_cpu(irq) \
-       (kstat_this_cpu.irqs[irq])
+       (this_cpu_read(kstat.irqs[irq])
 
 struct irq_desc;
 
index 676149a4ac5ff497367a484e2b66c01e915ccefd..89c74861a3da94ea6b21720b40f59ce69a42b918 100644 (file)
@@ -69,7 +69,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
 
                list_del_rcu(&p->tasks);
                list_del_init(&p->sibling);
-               __get_cpu_var(process_counts)--;
+               __this_cpu_dec(process_counts);
        }
        list_del_rcu(&p->thread_group);
 }
index 3b159c5991b7561bdba253eeb479f91622a35fb9..e05e27de67df20749248bd869396b1298fed40cc 100644 (file)
@@ -1282,7 +1282,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                        attach_pid(p, PIDTYPE_SID, task_session(current));
                        list_add_tail(&p->sibling, &p->real_parent->children);
                        list_add_tail_rcu(&p->tasks, &init_task.tasks);
-                       __get_cpu_var(process_counts)++;
+                       __this_cpu_inc(process_counts);
                }
                attach_pid(p, PIDTYPE_PID, pid);
                nr_threads++;
index 72206cf5c6cf854898d889a6a645e44febdd526f..29de5ae4ca956b7f06c0ca56c968493fb22fa316 100644 (file)
@@ -497,7 +497,7 @@ static inline int hrtimer_is_hres_enabled(void)
  */
 static inline int hrtimer_hres_active(void)
 {
-       return __get_cpu_var(hrtimer_bases).hres_active;
+       return __this_cpu_read(hrtimer_bases.hres_active);
 }
 
 /*
index 9a2264fc42cafac30d9264d569e4e673e9617f19..b032317f9964675c9d7c96b75a4b346f0e849ccc 100644 (file)
@@ -1074,8 +1074,8 @@ static DEFINE_PER_CPU(int, printk_pending);
 
 void printk_tick(void)
 {
-       if (__get_cpu_var(printk_pending)) {
-               __get_cpu_var(printk_pending) = 0;
+       if (__this_cpu_read(printk_pending)) {
+               __this_cpu_write(printk_pending, 0);
                wake_up_interruptible(&log_wait);
        }
 }
index ccdc04c479815addc8dbacea69643174a4636670..aeebf772d6a2567dbf70a20eae34eeedde006f6b 100644 (file)
@@ -367,8 +367,8 @@ void rcu_irq_exit(void)
        WARN_ON_ONCE(rdtp->dynticks & 0x1);
 
        /* If the interrupt queued a callback, get out of dyntick mode. */
-       if (__get_cpu_var(rcu_sched_data).nxtlist ||
-           __get_cpu_var(rcu_bh_data).nxtlist)
+       if (__this_cpu_read(rcu_sched_data.nxtlist) ||
+           __this_cpu_read(rcu_bh_data.nxtlist))
                set_need_resched();
 }
 
index 18f4be0d5fe0bbf853935972d9b441e95bc61c5a..d0a0dda52c1aa574db9bc742b4574a9960aabe8a 100644 (file)
@@ -70,7 +70,7 @@ char *softirq_to_name[NR_SOFTIRQS] = {
 static void wakeup_softirqd(void)
 {
        /* Interrupts are disabled: no need to stop preemption */
-       struct task_struct *tsk = __get_cpu_var(ksoftirqd);
+       struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 
        if (tsk && tsk->state != TASK_RUNNING)
                wake_up_process(tsk);
@@ -388,8 +388,8 @@ void __tasklet_schedule(struct tasklet_struct *t)
 
        local_irq_save(flags);
        t->next = NULL;
-       *__get_cpu_var(tasklet_vec).tail = t;
-       __get_cpu_var(tasklet_vec).tail = &(t->next);
+       *__this_cpu_read(tasklet_vec.tail) = t;
+       __this_cpu_write(tasklet_vec.tail, &(t->next));
        raise_softirq_irqoff(TASKLET_SOFTIRQ);
        local_irq_restore(flags);
 }
@@ -402,8 +402,8 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
 
        local_irq_save(flags);
        t->next = NULL;
-       *__get_cpu_var(tasklet_hi_vec).tail = t;
-       __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
+       *__this_cpu_read(tasklet_hi_vec.tail) = t;
+       __this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
        raise_softirq_irqoff(HI_SOFTIRQ);
        local_irq_restore(flags);
 }
@@ -414,8 +414,8 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
 {
        BUG_ON(!irqs_disabled());
 
-       t->next = __get_cpu_var(tasklet_hi_vec).head;
-       __get_cpu_var(tasklet_hi_vec).head = t;
+       t->next = __this_cpu_read(tasklet_hi_vec.head);
+       __this_cpu_write(tasklet_hi_vec.head, t);
        __raise_softirq_irqoff(HI_SOFTIRQ);
 }
 
@@ -426,9 +426,9 @@ static void tasklet_action(struct softirq_action *a)
        struct tasklet_struct *list;
 
        local_irq_disable();
-       list = __get_cpu_var(tasklet_vec).head;
-       __get_cpu_var(tasklet_vec).head = NULL;
-       __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
+       list = __this_cpu_read(tasklet_vec.head);
+       __this_cpu_write(tasklet_vec.head, NULL);
+       __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
        local_irq_enable();
 
        while (list) {
@@ -449,8 +449,8 @@ static void tasklet_action(struct softirq_action *a)
 
                local_irq_disable();
                t->next = NULL;
-               *__get_cpu_var(tasklet_vec).tail = t;
-               __get_cpu_var(tasklet_vec).tail = &(t->next);
+               *__this_cpu_read(tasklet_vec.tail) = t;
+               __this_cpu_write(tasklet_vec.tail, &(t->next));
                __raise_softirq_irqoff(TASKLET_SOFTIRQ);
                local_irq_enable();
        }
@@ -461,9 +461,9 @@ static void tasklet_hi_action(struct softirq_action *a)
        struct tasklet_struct *list;
 
        local_irq_disable();
-       list = __get_cpu_var(tasklet_hi_vec).head;
-       __get_cpu_var(tasklet_hi_vec).head = NULL;
-       __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
+       list = __this_cpu_read(tasklet_hi_vec.head);
+       __this_cpu_write(tasklet_hi_vec.head, NULL);
+       __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
        local_irq_enable();
 
        while (list) {
@@ -484,8 +484,8 @@ static void tasklet_hi_action(struct softirq_action *a)
 
                local_irq_disable();
                t->next = NULL;
-               *__get_cpu_var(tasklet_hi_vec).tail = t;
-               __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
+               *__this_cpu_read(tasklet_hi_vec.tail) = t;
+               __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
                __raise_softirq_irqoff(HI_SOFTIRQ);
                local_irq_enable();
        }
@@ -802,16 +802,16 @@ static void takeover_tasklets(unsigned int cpu)
 
        /* Find end, append list for that CPU. */
        if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
-               *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
-               __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
+               *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
+               this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
                per_cpu(tasklet_vec, cpu).head = NULL;
                per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
        }
        raise_softirq_irqoff(TASKLET_SOFTIRQ);
 
        if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
-               *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
-               __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
+               *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
+               __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
                per_cpu(tasklet_hi_vec, cpu).head = NULL;
                per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
        }
index b6b898d2eeefc1b0627c613b3d23a9bd4b21876e..051bc80a0c435cf47a8dfb8a0f5d2d49d20e188c 100644 (file)
@@ -49,7 +49,7 @@ struct tick_device *tick_get_device(int cpu)
  */
 int tick_is_oneshot_available(void)
 {
-       struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+       struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
        return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
 }
index aada0e52680ace6cc7d5e09a111a3879c1c6bda3..5cbc101f908b8483938c0153fc1ac023bcd1c784 100644 (file)
@@ -95,7 +95,7 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
  */
 int tick_program_event(ktime_t expires, int force)
 {
-       struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+       struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
        return tick_dev_program_event(dev, expires, force);
 }
@@ -167,7 +167,7 @@ int tick_oneshot_mode_active(void)
        int ret;
 
        local_irq_save(flags);
-       ret = __get_cpu_var(tick_cpu_device).mode == TICKDEV_MODE_ONESHOT;
+       ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT;
        local_irq_restore(flags);
 
        return ret;
index 6e3c41a4024c1cc66be01218e2c37498498f2469..8037a86106ed5b071bd51711920bc393f10edbb2 100644 (file)
@@ -116,12 +116,12 @@ static void __touch_watchdog(void)
 {
        int this_cpu = smp_processor_id();
 
-       __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
+       __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
 }
 
 void touch_softlockup_watchdog(void)
 {
-       __raw_get_cpu_var(watchdog_touch_ts) = 0;
+       __this_cpu_write(watchdog_touch_ts, 0);
 }
 EXPORT_SYMBOL(touch_softlockup_watchdog);
 
@@ -165,12 +165,12 @@ void touch_softlockup_watchdog_sync(void)
 /* watchdog detector functions */
 static int is_hardlockup(void)
 {
-       unsigned long hrint = __get_cpu_var(hrtimer_interrupts);
+       unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
 
-       if (__get_cpu_var(hrtimer_interrupts_saved) == hrint)
+       if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
                return 1;
 
-       __get_cpu_var(hrtimer_interrupts_saved) = hrint;
+       __this_cpu_write(hrtimer_interrupts_saved, hrint);
        return 0;
 }
 #endif
@@ -203,8 +203,8 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
        /* Ensure the watchdog never gets throttled */
        event->hw.interrupts = 0;
 
-       if (__get_cpu_var(watchdog_nmi_touch) == true) {
-               __get_cpu_var(watchdog_nmi_touch) = false;
+       if (__this_cpu_read(watchdog_nmi_touch) == true) {
+               __this_cpu_write(watchdog_nmi_touch, false);
                return;
        }
 
@@ -218,7 +218,7 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
                int this_cpu = smp_processor_id();
 
                /* only print hardlockups once */
-               if (__get_cpu_var(hard_watchdog_warn) == true)
+               if (__this_cpu_read(hard_watchdog_warn) == true)
                        return;
 
                if (hardlockup_panic)
@@ -226,16 +226,16 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
                else
                        WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
 
-               __get_cpu_var(hard_watchdog_warn) = true;
+               __this_cpu_write(hard_watchdog_warn, true);
                return;
        }
 
-       __get_cpu_var(hard_watchdog_warn) = false;
+       __this_cpu_write(hard_watchdog_warn, false);
        return;
 }
 static void watchdog_interrupt_count(void)
 {
-       __get_cpu_var(hrtimer_interrupts)++;
+       __this_cpu_inc(hrtimer_interrupts);
 }
 #else
 static inline void watchdog_interrupt_count(void) { return; }
@@ -244,7 +244,7 @@ static inline void watchdog_interrupt_count(void) { return; }
 /* watchdog kicker functions */
 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 {
-       unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
+       unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
        struct pt_regs *regs = get_irq_regs();
        int duration;
 
@@ -252,18 +252,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        watchdog_interrupt_count();
 
        /* kick the softlockup detector */
-       wake_up_process(__get_cpu_var(softlockup_watchdog));
+       wake_up_process(__this_cpu_read(softlockup_watchdog));
 
        /* .. and repeat */
        hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
 
        if (touch_ts == 0) {
-               if (unlikely(__get_cpu_var(softlockup_touch_sync))) {
+               if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
                        /*
                         * If the time stamp was touched atomically
                         * make sure the scheduler tick is up to date.
                         */
-                       __get_cpu_var(softlockup_touch_sync) = false;
+                       __this_cpu_write(softlockup_touch_sync, false);
                        sched_clock_tick();
                }
                __touch_watchdog();
@@ -279,7 +279,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        duration = is_softlockup(touch_ts);
        if (unlikely(duration)) {
                /* only warn once */
-               if (__get_cpu_var(soft_watchdog_warn) == true)
+               if (__this_cpu_read(soft_watchdog_warn) == true)
                        return HRTIMER_RESTART;
 
                printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
@@ -294,9 +294,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 
                if (softlockup_panic)
                        panic("softlockup: hung tasks");
-               __get_cpu_var(soft_watchdog_warn) = true;
+               __this_cpu_write(soft_watchdog_warn, true);
        } else
-               __get_cpu_var(soft_watchdog_warn) = false;
+               __this_cpu_write(soft_watchdog_warn, false);
 
        return HRTIMER_RESTART;
 }
index b1e40dafbab3cc6326a6913acf17155cbcd8e7f0..316d75596f3cb85ee3c31c0f54b0062e5d3f000d 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -829,12 +829,12 @@ static void init_reap_node(int cpu)
 
 static void next_reap_node(void)
 {
-       int node = __get_cpu_var(slab_reap_node);
+       int node = __this_cpu_read(slab_reap_node);
 
        node = next_node(node, node_online_map);
        if (unlikely(node >= MAX_NUMNODES))
                node = first_node(node_online_map);
-       __get_cpu_var(slab_reap_node) = node;
+       __this_cpu_write(slab_reap_node, node);
 }
 
 #else
@@ -1012,7 +1012,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
  */
 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
 {
-       int node = __get_cpu_var(slab_reap_node);
+       int node = __this_cpu_read(slab_reap_node);
 
        if (l3->alien) {
                struct array_cache *ac = l3->alien[node];