if (!get_device(&sch->dev))
return;
sch->todo = todo;
- if (!queue_work(slow_path_wq, &sch->todo_work)) {
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
/* Already queued, release workqueue ref. */
put_device(&sch->dev);
}
}
static DECLARE_WORK(slow_path_work, css_slow_path_func);
-struct workqueue_struct *slow_path_wq;
+struct workqueue_struct *cio_work_q;
void css_schedule_eval(struct subchannel_id schid)
{
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid);
atomic_set(&css_eval_scheduled, 1);
- queue_work(slow_path_wq, &slow_path_work);
+ queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set);
atomic_set(&css_eval_scheduled, 1);
- queue_work(slow_path_wq, &slow_path_work);
+ queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, unreg_set);
atomic_set(&css_eval_scheduled, 1);
- queue_work(slow_path_wq, &slow_path_work);
+ queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set);
}
void css_wait_for_slow_path(void)
{
- flush_workqueue(slow_path_wq);
+ flush_workqueue(cio_work_q);
}
/* Schedule reprobing of all unregistered subchannels. */
ret = css_bus_init();
if (ret)
return ret;
-
+ cio_work_q = create_singlethread_workqueue("cio");
+ if (!cio_work_q) {
+ ret = -ENOMEM;
+ goto out_bus;
+ }
ret = io_subchannel_init();
if (ret)
- css_bus_cleanup();
+ goto out_wq;
return ret;
+out_wq:
+ destroy_workqueue(cio_work_q);
+out_bus:
+ css_bus_cleanup();
+ return ret;
}
subsys_initcall(channel_subsystem_init);
css_schedule_eval_all();
/* Wait for the evaluation of subchannels to finish. */
wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0);
+ flush_workqueue(cio_work_q);
/* Wait for the subchannel type specific initialization to finish */
return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
}
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int);
static void recovery_func(unsigned long data);
-struct workqueue_struct *ccw_device_work;
wait_queue_head_t ccw_device_init_wq;
atomic_t ccw_device_init_count;
{
wait_event(ccw_device_init_wq,
atomic_read(&ccw_device_init_count) == 0);
- flush_workqueue(ccw_device_work);
+ flush_workqueue(cio_work_q);
}
static struct css_driver io_subchannel_driver = {
atomic_set(&ccw_device_init_count, 0);
setup_timer(&recovery_timer, recovery_func, 0);
- ccw_device_work = create_singlethread_workqueue("cio");
- if (!ccw_device_work)
- return -ENOMEM;
- slow_path_wq = create_singlethread_workqueue("kslowcrw");
- if (!slow_path_wq) {
- ret = -ENOMEM;
- goto out_err;
- }
- if ((ret = bus_register (&ccw_bus_type)))
- goto out_err;
-
+ ret = bus_register(&ccw_bus_type);
+ if (ret)
+ return ret;
ret = css_driver_register(&io_subchannel_driver);
if (ret)
- goto out_err;
+ bus_unregister(&ccw_bus_type);
- return 0;
-out_err:
- if (ccw_device_work)
- destroy_workqueue(ccw_device_work);
- if (slow_path_wq)
- destroy_workqueue(slow_path_wq);
return ret;
}
/* Get workqueue ref. */
if (!get_device(&cdev->dev))
return;
- if (!queue_work(slow_path_wq, &cdev->private->todo_work)) {
+ if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
/* Already queued, release workqueue ref. */
put_device(&cdev->dev);
}
EXPORT_SYMBOL(ccw_driver_unregister);
EXPORT_SYMBOL(get_ccwdev_by_busid);
EXPORT_SYMBOL(ccw_bus_type);
-EXPORT_SYMBOL(ccw_device_work);
EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);