workqueue: implement cpu intensive workqueue
authorTejun Heo <tj@kernel.org>
Tue, 29 Jun 2010 08:07:15 +0000 (10:07 +0200)
committerTejun Heo <tj@kernel.org>
Tue, 29 Jun 2010 08:07:15 +0000 (10:07 +0200)
This patch implements cpu intensive workqueue which can be specified
with WQ_CPU_INTENSIVE flag on creation.  Works queued to a cpu
intensive workqueue don't participate in concurrency management.  IOW,
it doesn't contribute to gcwq->nr_running and thus doesn't delay
excution of other works.

Note that although cpu intensive works won't delay other works, they
can be delayed by other works.  Combine with WQ_HIGHPRI to avoid being
delayed by other works too.

As the name suggests this is useful when using workqueue for cpu
intensive works.  Workers executing cpu intensive works are not
considered for workqueue concurrency management and left for the
scheduler to manage.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
include/linux/workqueue.h
kernel/workqueue.c

index 006dcf7e808ae0deb74ed8a747a2db6a9714a979..3f36d37ac5baded594762585f67807ee29d41109 100644 (file)
@@ -232,6 +232,7 @@ enum {
        WQ_NON_REENTRANT        = 1 << 2, /* guarantee non-reentrance */
        WQ_RESCUER              = 1 << 3, /* has an rescue worker */
        WQ_HIGHPRI              = 1 << 4, /* high priority */
+       WQ_CPU_INTENSIVE        = 1 << 5, /* cpu instensive workqueue */
 
        WQ_MAX_ACTIVE           = 512,    /* I like 512, better ideas? */
        WQ_DFL_ACTIVE           = WQ_MAX_ACTIVE / 2,
index 5775717288d54c3cfc70c591948722bb82601927..6fa847c5c5e992e1461cbcd74cc95ab2331ea72d 100644 (file)
@@ -52,8 +52,10 @@ enum {
        WORKER_PREP             = 1 << 3,       /* preparing to run works */
        WORKER_ROGUE            = 1 << 4,       /* not bound to any cpu */
        WORKER_REBIND           = 1 << 5,       /* mom is home, come back */
+       WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
 
-       WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND,
+       WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
+                                 WORKER_CPU_INTENSIVE,
 
        /* gcwq->trustee_state */
        TRUSTEE_START           = 0,            /* start */
@@ -1641,6 +1643,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
        struct global_cwq *gcwq = cwq->gcwq;
        struct hlist_head *bwh = busy_worker_head(gcwq, work);
+       bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
        work_func_t f = work->func;
        int work_color;
        struct worker *collision;
@@ -1692,6 +1695,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
                        gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
        }
 
+       /*
+        * CPU intensive works don't participate in concurrency
+        * management.  They're the scheduler's responsibility.
+        */
+       if (unlikely(cpu_intensive))
+               worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
+
        spin_unlock_irq(&gcwq->lock);
 
        work_clear_pending(work);
@@ -1713,6 +1723,10 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
 
        spin_lock_irq(&gcwq->lock);
 
+       /* clear cpu intensive status */
+       if (unlikely(cpu_intensive))
+               worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
+
        /* we're done with it, release */
        hlist_del_init(&worker->hentry);
        worker->current_work = NULL;