workqueue: drop "std" from cpu_std_worker_pools and for_each_std_worker_pool()
authorTejun Heo <tj@kernel.org>
Tue, 12 Mar 2013 18:30:03 +0000 (11:30 -0700)
committerTejun Heo <tj@kernel.org>
Tue, 12 Mar 2013 18:30:03 +0000 (11:30 -0700)
All per-cpu pools are standard, so there's no need to use both "cpu"
and "std" and for_each_std_worker_pool() is confusing in that it can
be used only for per-cpu pools.

* s/cpu_std_worker_pools/cpu_worker_pools/

* s/for_each_std_worker_pool()/for_each_cpu_worker_pool()/

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
kernel/workqueue.c

index 7642bb7b70eeb461d0994b096ee3c73b473d5fb1..2c5073214774a4a690a3f89202c3f82939166dbd 100644 (file)
@@ -252,9 +252,9 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
                           lockdep_is_held(&workqueue_lock),            \
                           "sched RCU or workqueue lock should be held")
 
-#define for_each_std_worker_pool(pool, cpu)                            \
-       for ((pool) = &per_cpu(cpu_std_worker_pools, cpu)[0];           \
-            (pool) < &per_cpu(cpu_std_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
+#define for_each_cpu_worker_pool(pool, cpu)                            \
+       for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];               \
+            (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
             (pool)++)
 
 #define for_each_busy_worker(worker, i, pool)                          \
@@ -420,7 +420,7 @@ static bool workqueue_freezing;             /* W: have wqs started freezing? */
  * POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set.
  */
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
-                                    cpu_std_worker_pools);
+                                    cpu_worker_pools);
 
 /*
  * idr of all pools.  Modifications are protected by workqueue_lock.  Read
@@ -3342,7 +3342,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
                        struct pool_workqueue *pwq =
                                per_cpu_ptr(wq->cpu_pwqs, cpu);
                        struct worker_pool *cpu_pools =
-                               per_cpu(cpu_std_worker_pools, cpu);
+                               per_cpu(cpu_worker_pools, cpu);
 
                        pwq->pool = &cpu_pools[highpri];
                        list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs);
@@ -3694,7 +3694,7 @@ static void wq_unbind_fn(struct work_struct *work)
        struct worker *worker;
        int i;
 
-       for_each_std_worker_pool(pool, cpu) {
+       for_each_cpu_worker_pool(pool, cpu) {
                WARN_ON_ONCE(cpu != smp_processor_id());
 
                mutex_lock(&pool->assoc_mutex);
@@ -3737,7 +3737,7 @@ static void wq_unbind_fn(struct work_struct *work)
         * unbound chain execution of pending work items if other workers
         * didn't already.
         */
-       for_each_std_worker_pool(pool, cpu)
+       for_each_cpu_worker_pool(pool, cpu)
                atomic_set(&pool->nr_running, 0);
 }
 
@@ -3754,7 +3754,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_UP_PREPARE:
-               for_each_std_worker_pool(pool, cpu) {
+               for_each_cpu_worker_pool(pool, cpu) {
                        struct worker *worker;
 
                        if (pool->nr_workers)
@@ -3772,7 +3772,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
 
        case CPU_DOWN_FAILED:
        case CPU_ONLINE:
-               for_each_std_worker_pool(pool, cpu) {
+               for_each_cpu_worker_pool(pool, cpu) {
                        mutex_lock(&pool->assoc_mutex);
                        spin_lock_irq(&pool->lock);
 
@@ -4012,7 +4012,7 @@ static int __init init_workqueues(void)
                struct worker_pool *pool;
 
                i = 0;
-               for_each_std_worker_pool(pool, cpu) {
+               for_each_cpu_worker_pool(pool, cpu) {
                        BUG_ON(init_worker_pool(pool));
                        pool->cpu = cpu;
                        cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
@@ -4027,7 +4027,7 @@ static int __init init_workqueues(void)
        for_each_online_cpu(cpu) {
                struct worker_pool *pool;
 
-               for_each_std_worker_pool(pool, cpu) {
+               for_each_cpu_worker_pool(pool, cpu) {
                        struct worker *worker;
 
                        pool->flags &= ~POOL_DISASSOCIATED;