workqueue: deprecate system_nrt[_freezable]_wq
authorTejun Heo <tj@kernel.org>
Mon, 20 Aug 2012 21:51:24 +0000 (14:51 -0700)
committerTejun Heo <tj@kernel.org>
Mon, 20 Aug 2012 21:51:24 +0000 (14:51 -0700)
system_nrt[_freezable]_wq are now spurious.  Mark them deprecated and
convert all users to system[_freezable]_wq.

If you're cc'd and wondering what's going on: Now all workqueues are
non-reentrant, so there's no reason to use system_nrt[_freezable]_wq.
Please use system[_freezable]_wq instead.

This patch doesn't make any functional difference.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-By: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: David Airlie <airlied@linux.ie>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: David Howells <dhowells@redhat.com>
block/blk-throttle.c
block/genhd.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/hid/hid-wiimote-ext.c
drivers/mmc/core/host.c
drivers/net/virtio_net.c
include/linux/workqueue.h
kernel/srcu.c
security/keys/gc.c
security/keys/key.c

index e287c19908c8a31d3c4d29b1586921066032afa6..5a58e779912b909ece2ac419c3ef823e0b0f0b7b 100644 (file)
@@ -180,7 +180,7 @@ static inline unsigned int total_nr_queued(struct throtl_data *td)
 
 /*
  * Worker for allocating per cpu stat for tgs. This is scheduled on the
- * system_nrt_wq once there are some groups on the alloc_list waiting for
+ * system_wq once there are some groups on the alloc_list waiting for
  * allocation.
  */
 static void tg_stats_alloc_fn(struct work_struct *work)
@@ -194,8 +194,7 @@ alloc_stats:
                stats_cpu = alloc_percpu(struct tg_stats_cpu);
                if (!stats_cpu) {
                        /* allocation failed, try again after some time */
-                       queue_delayed_work(system_nrt_wq, dwork,
-                                          msecs_to_jiffies(10));
+                       schedule_delayed_work(dwork, msecs_to_jiffies(10));
                        return;
                }
        }
@@ -238,7 +237,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
         */
        spin_lock_irqsave(&tg_stats_alloc_lock, flags);
        list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
-       queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
+       schedule_delayed_work(&tg_stats_alloc_work, 0);
        spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
 }
 
index 5d8b44a6442ba1360a3a35b63b59339e814121a2..a2f3d6a5f55c0b3c494ce1faebd481c0b38feb1c 100644 (file)
@@ -1490,9 +1490,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
        intv = disk_events_poll_jiffies(disk);
        set_timer_slack(&ev->dwork.timer, intv / 4);
        if (check_now)
-               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+               queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
        else if (intv)
-               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+               queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
 out_unlock:
        spin_unlock_irqrestore(&ev->lock, flags);
 }
@@ -1535,7 +1535,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
        spin_lock_irq(&ev->lock);
        ev->clearing |= mask;
        if (!ev->block)
-               mod_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+               mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
        spin_unlock_irq(&ev->lock);
 }
 
@@ -1571,7 +1571,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
 
        /* uncondtionally schedule event check and wait for it to finish */
        disk_block_events(disk);
-       queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+       queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
        flush_delayed_work(&ev->dwork);
        __disk_unblock_events(disk, false);
 
@@ -1608,7 +1608,7 @@ static void disk_events_workfn(struct work_struct *work)
 
        intv = disk_events_poll_jiffies(disk);
        if (!ev->block && intv)
-               queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+               queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
 
        spin_unlock_irq(&ev->lock);
 
index 3252e7067d8b3ea11ecfbcf47cab922c0919bf91..8fa9d52820d90fa5927b95897e2f28f21bc9c149 100644 (file)
@@ -968,7 +968,7 @@ static void output_poll_execute(struct work_struct *work)
        }
 
        if (repoll)
-               queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
+               schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
 }
 
 void drm_kms_helper_poll_disable(struct drm_device *dev)
@@ -993,7 +993,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
        }
 
        if (poll)
-               queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+               schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
@@ -1020,6 +1020,6 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
        /* kill timer and schedule immediate execution, this doesn't block */
        cancel_delayed_work(&dev->mode_config.output_poll_work);
        if (drm_kms_helper_poll)
-               queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
+               schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
 }
 EXPORT_SYMBOL(drm_helper_hpd_irq_event);
index 0a1805c9b0e52a2991afdeaa2e7b067737d1e87c..d37cd092ffc7759220f507462661e51db2b42fe7 100644 (file)
@@ -204,7 +204,7 @@ static void wiiext_worker(struct work_struct *work)
 /* schedule work only once, otherwise mark for reschedule */
 static void wiiext_schedule(struct wiimote_ext *ext)
 {
-       queue_work(system_nrt_wq, &ext->worker);
+       schedule_work(&ext->worker);
 }
 
 /*
index 597f189b44278caa4edd682ef27b6c57066eb1f2..ee2e16b170174df6e59c0d53a314d0058ddaff2a 100644 (file)
@@ -204,8 +204,8 @@ void mmc_host_clk_release(struct mmc_host *host)
        host->clk_requests--;
        if (mmc_host_may_gate_card(host->card) &&
            !host->clk_requests)
-               queue_delayed_work(system_nrt_wq, &host->clk_gate_work,
-                               msecs_to_jiffies(host->clkgate_delay));
+               schedule_delayed_work(&host->clk_gate_work,
+                                     msecs_to_jiffies(host->clkgate_delay));
        spin_unlock_irqrestore(&host->clk_lock, flags);
 }
 
index 83d2b0c34c5e63045eaeb63c3e40b14ab68e7875..9650c413e11f2e6408ad93737b5ba93915e952d5 100644 (file)
@@ -521,7 +521,7 @@ static void refill_work(struct work_struct *work)
        /* In theory, this can happen: if we don't get any buffers in
         * we will *never* try to fill again. */
        if (still_empty)
-               queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
+               schedule_delayed_work(&vi->refill, HZ/2);
 }
 
 static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -540,7 +540,7 @@ again:
 
        if (vi->num < vi->max / 2) {
                if (!try_fill_recv(vi, GFP_ATOMIC))
-                       queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+                       schedule_delayed_work(&vi->refill, 0);
        }
 
        /* Out of packets? */
@@ -745,7 +745,7 @@ static int virtnet_open(struct net_device *dev)
 
        /* Make sure we have some buffers: if oom use wq. */
        if (!try_fill_recv(vi, GFP_KERNEL))
-               queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+               schedule_delayed_work(&vi->refill, 0);
 
        virtnet_napi_enable(vi);
        return 0;
@@ -1020,7 +1020,7 @@ static void virtnet_config_changed(struct virtio_device *vdev)
 {
        struct virtnet_info *vi = vdev->priv;
 
-       queue_work(system_nrt_wq, &vi->config_work);
+       schedule_work(&vi->config_work);
 }
 
 static int init_vqs(struct virtnet_info *vi)
@@ -1152,7 +1152,7 @@ static int virtnet_probe(struct virtio_device *vdev)
           otherwise get link status from config. */
        if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
                netif_carrier_off(dev);
-               queue_work(system_nrt_wq, &vi->config_work);
+               schedule_work(&vi->config_work);
        } else {
                vi->status = VIRTIO_NET_S_LINK_UP;
                netif_carrier_on(dev);
@@ -1264,7 +1264,7 @@ static int virtnet_restore(struct virtio_device *vdev)
        netif_device_attach(vi->dev);
 
        if (!try_fill_recv(vi, GFP_KERNEL))
-               queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+               schedule_delayed_work(&vi->refill, 0);
 
        mutex_lock(&vi->config_lock);
        vi->config_enable = true;
index a351be7c3e91cf126b9a475b9312f273da86aeff..1ce3fb08308de79edb951432604c1625efae8723 100644 (file)
@@ -310,12 +310,12 @@ extern struct workqueue_struct *system_long_wq;
 extern struct workqueue_struct *system_unbound_wq;
 extern struct workqueue_struct *system_freezable_wq;
 
-static inline struct workqueue_struct *__system_nrt_wq(void)
+static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
 {
        return system_wq;
 }
 
-static inline struct workqueue_struct *__system_nrt_freezable_wq(void)
+static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
 {
        return system_freezable_wq;
 }
index 2095be3318d519dede48a5fcfaa5b51883cfd139..97c465ebd8444cebc0f3a6e44ae0fdb04b552a2e 100644 (file)
@@ -379,7 +379,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
        rcu_batch_queue(&sp->batch_queue, head);
        if (!sp->running) {
                sp->running = true;
-               queue_delayed_work(system_nrt_wq, &sp->work, 0);
+               schedule_delayed_work(&sp->work, 0);
        }
        spin_unlock_irqrestore(&sp->queue_lock, flags);
 }
@@ -631,7 +631,7 @@ static void srcu_reschedule(struct srcu_struct *sp)
        }
 
        if (pending)
-               queue_delayed_work(system_nrt_wq, &sp->work, SRCU_INTERVAL);
+               schedule_delayed_work(&sp->work, SRCU_INTERVAL);
 }
 
 /*
index 61ab7c82ebb12f1470d26d6aa29519f9c33a3ce9..d67c97bb10256d5dc5a9b74b3b8aaa37022f96b1 100644 (file)
@@ -62,7 +62,7 @@ void key_schedule_gc(time_t gc_at)
 
        if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
                kdebug("IMMEDIATE");
-               queue_work(system_nrt_wq, &key_gc_work);
+               schedule_work(&key_gc_work);
        } else if (gc_at < key_gc_next_run) {
                kdebug("DEFERRED");
                key_gc_next_run = gc_at;
@@ -77,7 +77,7 @@ void key_schedule_gc(time_t gc_at)
 void key_schedule_gc_links(void)
 {
        set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
-       queue_work(system_nrt_wq, &key_gc_work);
+       schedule_work(&key_gc_work);
 }
 
 /*
@@ -120,7 +120,7 @@ void key_gc_keytype(struct key_type *ktype)
        set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
 
        kdebug("schedule");
-       queue_work(system_nrt_wq, &key_gc_work);
+       schedule_work(&key_gc_work);
 
        kdebug("sleep");
        wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
@@ -369,7 +369,7 @@ maybe_resched:
        }
 
        if (gc_state & KEY_GC_REAP_AGAIN)
-               queue_work(system_nrt_wq, &key_gc_work);
+               schedule_work(&key_gc_work);
        kleave(" [end %x]", gc_state);
        return;
 
index 50d96d4e06f235c3e8950255c4b8bd5fd64aa7d2..3cbe3529c418d55f6bc5080bf65a1707c6a07dc4 100644 (file)
@@ -598,7 +598,7 @@ void key_put(struct key *key)
                key_check(key);
 
                if (atomic_dec_and_test(&key->usage))
-                       queue_work(system_nrt_wq, &key_gc_work);
+                       schedule_work(&key_gc_work);
        }
 }
 EXPORT_SYMBOL(key_put);