[<ffffffff817db154>] kernel_thread_helper+0x4/0x10
[<ffffffff81066430>] ? finish_task_switch+0x80/0x110
[<ffffffff817d9c04>] ? retint_restore_args+0xe/0xe
- [<ffffffff81097510>] ? __init_kthread_worker+0x70/0x70
+ [<ffffffff81097510>] ? __kthread_init_worker+0x70/0x70
[<ffffffff817db150>] ? gs_change+0xb/0xb
Line 2776 of block/cfq-iosched.c in v3.0-rc5 is as follows:
*/
smp_mb();
if (atomic_dec_if_positive(&ps->pending) > 0)
- queue_kthread_work(&pit->worker, &pit->expired);
+ kthread_queue_work(&pit->worker, &pit->expired);
}
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
static void destroy_pit_timer(struct kvm_pit *pit)
{
hrtimer_cancel(&pit->pit_state.timer);
- flush_kthread_work(&pit->expired);
+ kthread_flush_work(&pit->expired);
}
static void pit_do_work(struct kthread_work *work)
if (atomic_read(&ps->reinject))
atomic_inc(&ps->pending);
- queue_kthread_work(&pt->worker, &pt->expired);
+ kthread_queue_work(&pt->worker, &pt->expired);
if (ps->is_periodic) {
hrtimer_add_expires_ns(&ps->timer, ps->period);
/* TODO The new value only affected after the retriggered */
hrtimer_cancel(&ps->timer);
- flush_kthread_work(&pit->expired);
+ kthread_flush_work(&pit->expired);
ps->period = interval;
ps->is_periodic = is_period;
pid_nr = pid_vnr(pid);
put_pid(pid);
- init_kthread_worker(&pit->worker);
+ kthread_init_worker(&pit->worker);
pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
"kvm-pit/%d", pid_nr);
if (IS_ERR(pit->worker_task))
goto fail_kthread;
- init_kthread_work(&pit->expired, pit_do_work);
+ kthread_init_work(&pit->expired, pit_do_work);
pit->kvm = kvm;
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
kvm_pit_set_reinject(pit, false);
hrtimer_cancel(&pit->pit_state.timer);
- flush_kthread_work(&pit->expired);
+ kthread_flush_work(&pit->expired);
kthread_stop(pit->worker_task);
kvm_free_irq_source_id(kvm, pit->irq_source_id);
kfree(pit);
/* If another context is idling then defer */
if (engine->idling) {
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
goto out;
}
/* Only do teardown in the thread */
if (!in_kthread) {
- queue_kthread_work(&engine->kworker,
+ kthread_queue_work(&engine->kworker,
&engine->pump_requests);
goto out;
}
ret = ablkcipher_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
ret = ahash_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
req->base.complete(&req->base, err);
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
}
EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
req->base.complete(&req->base, err);
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
}
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
engine->running = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
- queue_kthread_work(&engine->kworker, &engine->pump_requests);
+ kthread_queue_work(&engine->kworker, &engine->pump_requests);
return 0;
}
crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
spin_lock_init(&engine->queue_lock);
- init_kthread_worker(&engine->kworker);
+ kthread_init_worker(&engine->kworker);
engine->kworker_task = kthread_run(kthread_worker_fn,
&engine->kworker, "%s",
engine->name);
dev_err(dev, "failed to create crypto request pump task\n");
return NULL;
}
- init_kthread_work(&engine->pump_requests, crypto_pump_work);
+ kthread_init_work(&engine->pump_requests, crypto_pump_work);
if (engine->rt) {
dev_info(dev, "will run requests pump with realtime priority\n");
if (ret)
return ret;
- flush_kthread_worker(&engine->kworker);
+ kthread_flush_worker(&engine->kworker);
kthread_stop(engine->kworker_task);
return 0;
static void loop_unprepare_queue(struct loop_device *lo)
{
- flush_kthread_worker(&lo->worker);
+ kthread_flush_worker(&lo->worker);
kthread_stop(lo->worker_task);
}
static int loop_prepare_queue(struct loop_device *lo)
{
- init_kthread_worker(&lo->worker);
+ kthread_init_worker(&lo->worker);
lo->worker_task = kthread_run(kthread_worker_fn,
&lo->worker, "loop%d", lo->lo_number);
if (IS_ERR(lo->worker_task))
break;
}
- queue_kthread_work(&lo->worker, &cmd->work);
+ kthread_queue_work(&lo->worker, &cmd->work);
return BLK_MQ_RQ_QUEUE_OK;
}
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->rq = rq;
- init_kthread_work(&cmd->work, loop_queue_work);
+ kthread_init_work(&cmd->work, loop_queue_work);
return 0;
}
if (likely(worker)) {
cq->notify = RVT_CQ_NONE;
cq->triggered++;
- queue_kthread_work(worker, &cq->comptask);
+ kthread_queue_work(worker, &cq->comptask);
}
}
cq->ibcq.cqe = entries;
cq->notify = RVT_CQ_NONE;
spin_lock_init(&cq->lock);
- init_kthread_work(&cq->comptask, send_complete);
+ kthread_init_work(&cq->comptask, send_complete);
cq->queue = wc;
ret = &cq->ibcq;
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
struct rvt_dev_info *rdi = cq->rdi;
- flush_kthread_work(&cq->comptask);
+ kthread_flush_work(&cq->comptask);
spin_lock(&rdi->n_cqs_lock);
rdi->n_cqs_allocated--;
spin_unlock(&rdi->n_cqs_lock);
rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
if (!rdi->worker)
return -ENOMEM;
- init_kthread_worker(rdi->worker);
+ kthread_init_worker(rdi->worker);
task = kthread_create_on_node(
kthread_worker_fn,
rdi->worker,
/* blocks future queuing from send_complete() */
rdi->worker = NULL;
smp_wmb(); /* See rdi_cq_enter */
- flush_kthread_worker(worker);
+ kthread_flush_worker(worker);
kthread_stop(worker->task);
kfree(worker);
}
if (!md->init_tio_pdu)
memset(&tio->info, 0, sizeof(tio->info));
if (md->kworker_task)
- init_kthread_work(&tio->work, map_tio_request);
+ kthread_init_work(&tio->work, map_tio_request);
}
static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
tio = tio_from_request(rq);
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
- queue_kthread_work(&md->kworker, &tio->work);
+ kthread_queue_work(&md->kworker, &tio->work);
BUG_ON(!irqs_disabled());
}
}
blk_queue_prep_rq(md->queue, dm_old_prep_fn);
/* Initialize the request-based DM worker thread */
- init_kthread_worker(&md->kworker);
+ kthread_init_worker(&md->kworker);
md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
"kdmwork-%s", dm_device_name(md));
if (IS_ERR(md->kworker_task))
spin_unlock_irq(q->queue_lock);
if (dm_request_based(md) && md->kworker_task)
- flush_kthread_worker(&md->kworker);
+ kthread_flush_worker(&md->kworker);
/*
* Take suspend_lock so that presuspend and postsuspend methods
if (dm_request_based(md)) {
dm_stop_queue(md->queue);
if (md->kworker_task)
- flush_kthread_worker(&md->kworker);
+ kthread_flush_worker(&md->kworker);
}
flush_workqueue(md->wq);
spin_lock_init(&itv->lock);
spin_lock_init(&itv->dma_reg_lock);
- init_kthread_worker(&itv->irq_worker);
+ kthread_init_worker(&itv->irq_worker);
itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker,
"%s", itv->v4l2_dev.name);
if (IS_ERR(itv->irq_worker_task)) {
/* must use the FIFO scheduler as it is realtime sensitive */
sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, ¶m);
- init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
+ kthread_init_work(&itv->irq_work, ivtv_irq_work_handler);
/* Initial settings */
itv->cxhdl.port = CX2341X_PORT_MEMORY;
del_timer_sync(&itv->dma_timer);
/* Kill irq worker */
- flush_kthread_worker(&itv->irq_worker);
+ kthread_flush_worker(&itv->irq_worker);
kthread_stop(itv->irq_worker_task);
ivtv_streams_cleanup(itv);
}
if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
- queue_kthread_work(&itv->irq_worker, &itv->irq_work);
+ kthread_queue_work(&itv->irq_worker, &itv->irq_work);
}
spin_unlock(&itv->dma_reg_lock);
}
if (oldfilter != priv->rxfilter)
- queue_kthread_work(&priv->kworker, &priv->setrx_work);
+ kthread_queue_work(&priv->kworker, &priv->setrx_work);
}
static void encx24j600_hw_tx(struct encx24j600_priv *priv)
/* Remember the skb for deferred processing */
priv->tx_skb = skb;
- queue_kthread_work(&priv->kworker, &priv->tx_work);
+ kthread_queue_work(&priv->kworker, &priv->tx_work);
return NETDEV_TX_OK;
}
goto out_free;
}
- init_kthread_worker(&priv->kworker);
- init_kthread_work(&priv->tx_work, encx24j600_tx_proc);
- init_kthread_work(&priv->setrx_work, encx24j600_setrx_proc);
+ kthread_init_worker(&priv->kworker);
+ kthread_init_work(&priv->tx_work, encx24j600_tx_proc);
+ kthread_init_work(&priv->setrx_work, encx24j600_setrx_proc);
priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
"encx24j600");
/* If another context is idling the device then defer */
if (master->idling) {
- queue_kthread_work(&master->kworker, &master->pump_messages);
+ kthread_queue_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
return;
}
/* Only do teardown in the thread */
if (!in_kthread) {
- queue_kthread_work(&master->kworker,
+ kthread_queue_work(&master->kworker,
&master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
return;
master->running = false;
master->busy = false;
- init_kthread_worker(&master->kworker);
+ kthread_init_worker(&master->kworker);
master->kworker_task = kthread_run(kthread_worker_fn,
&master->kworker, "%s",
dev_name(&master->dev));
dev_err(&master->dev, "failed to create message pump task\n");
return PTR_ERR(master->kworker_task);
}
- init_kthread_work(&master->pump_messages, spi_pump_messages);
+ kthread_init_work(&master->pump_messages, spi_pump_messages);
/*
* Master config will indicate if this controller should run the
spin_lock_irqsave(&master->queue_lock, flags);
master->cur_msg = NULL;
master->cur_msg_prepared = false;
- queue_kthread_work(&master->kworker, &master->pump_messages);
+ kthread_queue_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
trace_spi_message_done(mesg);
master->cur_msg = NULL;
spin_unlock_irqrestore(&master->queue_lock, flags);
- queue_kthread_work(&master->kworker, &master->pump_messages);
+ kthread_queue_work(&master->kworker, &master->pump_messages);
return 0;
}
ret = spi_stop_queue(master);
/*
- * flush_kthread_worker will block until all work is done.
+ * kthread_flush_worker will block until all work is done.
* If the reason that stop_queue timed out is that the work will never
* finish, then it does no good to call flush/stop thread, so
* return anyway.
return ret;
}
- flush_kthread_worker(&master->kworker);
+ kthread_flush_worker(&master->kworker);
kthread_stop(master->kworker_task);
return 0;
list_add_tail(&msg->queue, &master->queue);
if (!master->busy && need_pump)
- queue_kthread_work(&master->kworker, &master->pump_messages);
+ kthread_queue_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
return 0;
{
struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
- queue_kthread_work(&s->kworker, &s->irq_work);
+ kthread_queue_work(&s->kworker, &s->irq_work);
return IRQ_HANDLED;
}
one->config.flags |= SC16IS7XX_RECONF_IER;
one->config.ier_clear |= bit;
- queue_kthread_work(&s->kworker, &one->reg_work);
+ kthread_queue_work(&s->kworker, &one->reg_work);
}
static void sc16is7xx_stop_tx(struct uart_port *port)
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
- queue_kthread_work(&s->kworker, &one->tx_work);
+ kthread_queue_work(&s->kworker, &one->tx_work);
}
static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
one->config.flags |= SC16IS7XX_RECONF_MD;
- queue_kthread_work(&s->kworker, &one->reg_work);
+ kthread_queue_work(&s->kworker, &one->reg_work);
}
static void sc16is7xx_break_ctl(struct uart_port *port, int break_state)
port->rs485 = *rs485;
one->config.flags |= SC16IS7XX_RECONF_RS485;
- queue_kthread_work(&s->kworker, &one->reg_work);
+ kthread_queue_work(&s->kworker, &one->reg_work);
return 0;
}
sc16is7xx_power(port, 0);
- flush_kthread_worker(&s->kworker);
+ kthread_flush_worker(&s->kworker);
}
static const char *sc16is7xx_type(struct uart_port *port)
s->devtype = devtype;
dev_set_drvdata(dev, s);
- init_kthread_worker(&s->kworker);
- init_kthread_work(&s->irq_work, sc16is7xx_ist);
+ kthread_init_worker(&s->kworker);
+ kthread_init_work(&s->irq_work, sc16is7xx_ist);
s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
"sc16is7xx");
if (IS_ERR(s->kworker_task)) {
SC16IS7XX_EFCR_RXDISABLE_BIT |
SC16IS7XX_EFCR_TXDISABLE_BIT);
/* Initialize kthread work structs */
- init_kthread_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
- init_kthread_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
+ kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
+ kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
/* Register port */
uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
sc16is7xx_power(&s->p[i].port, 0);
}
- flush_kthread_worker(&s->kworker);
+ kthread_flush_worker(&s->kworker);
kthread_stop(s->kworker_task);
if (!IS_ERR(s->clk))
* Simple work processor based on kthread.
*
* This provides easier way to make use of kthreads. A kthread_work
- * can be queued and flushed using queue/flush_kthread_work()
+ * can be queued and flushed using queue/kthread_flush_work()
* respectively. Queued kthread_works are processed by a kthread
* running kthread_worker_fn().
*/
*/
#ifdef CONFIG_LOCKDEP
# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
- ({ init_kthread_worker(&worker); worker; })
+ ({ kthread_init_worker(&worker); worker; })
# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
#else
# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
#endif
-extern void __init_kthread_worker(struct kthread_worker *worker,
+extern void __kthread_init_worker(struct kthread_worker *worker,
const char *name, struct lock_class_key *key);
-#define init_kthread_worker(worker) \
+#define kthread_init_worker(worker) \
do { \
static struct lock_class_key __key; \
- __init_kthread_worker((worker), "("#worker")->lock", &__key); \
+ __kthread_init_worker((worker), "("#worker")->lock", &__key); \
} while (0)
-#define init_kthread_work(work, fn) \
+#define kthread_init_work(work, fn) \
do { \
memset((work), 0, sizeof(struct kthread_work)); \
INIT_LIST_HEAD(&(work)->node); \
int kthread_worker_fn(void *worker_ptr);
-bool queue_kthread_work(struct kthread_worker *worker,
+bool kthread_queue_work(struct kthread_worker *worker,
struct kthread_work *work);
-void flush_kthread_work(struct kthread_work *work);
-void flush_kthread_worker(struct kthread_worker *worker);
+void kthread_flush_work(struct kthread_work *work);
+void kthread_flush_worker(struct kthread_worker *worker);
#endif /* _LINUX_KTHREAD_H */
return 0;
}
-void __init_kthread_worker(struct kthread_worker *worker,
+void __kthread_init_worker(struct kthread_worker *worker,
const char *name,
struct lock_class_key *key)
{
INIT_LIST_HEAD(&worker->work_list);
worker->task = NULL;
}
-EXPORT_SYMBOL_GPL(__init_kthread_worker);
+EXPORT_SYMBOL_GPL(__kthread_init_worker);
/**
* kthread_worker_fn - kthread function to process kthread_worker
EXPORT_SYMBOL_GPL(kthread_worker_fn);
/* insert @work before @pos in @worker */
-static void insert_kthread_work(struct kthread_worker *worker,
+static void kthread_insert_work(struct kthread_worker *worker,
struct kthread_work *work,
struct list_head *pos)
{
}
/**
- * queue_kthread_work - queue a kthread_work
+ * kthread_queue_work - queue a kthread_work
* @worker: target kthread_worker
* @work: kthread_work to queue
*
* must have been created with kthread_worker_create(). Returns %true
* if @work was successfully queued, %false if it was already pending.
*/
-bool queue_kthread_work(struct kthread_worker *worker,
+bool kthread_queue_work(struct kthread_worker *worker,
struct kthread_work *work)
{
bool ret = false;
spin_lock_irqsave(&worker->lock, flags);
if (list_empty(&work->node)) {
- insert_kthread_work(worker, work, &worker->work_list);
+ kthread_insert_work(worker, work, &worker->work_list);
ret = true;
}
spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(queue_kthread_work);
+EXPORT_SYMBOL_GPL(kthread_queue_work);
struct kthread_flush_work {
struct kthread_work work;
}
/**
- * flush_kthread_work - flush a kthread_work
+ * kthread_flush_work - flush a kthread_work
* @work: work to flush
*
* If @work is queued or executing, wait for it to finish execution.
*/
-void flush_kthread_work(struct kthread_work *work)
+void kthread_flush_work(struct kthread_work *work)
{
struct kthread_flush_work fwork = {
KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
}
if (!list_empty(&work->node))
- insert_kthread_work(worker, &fwork.work, work->node.next);
+ kthread_insert_work(worker, &fwork.work, work->node.next);
else if (worker->current_work == work)
- insert_kthread_work(worker, &fwork.work, worker->work_list.next);
+ kthread_insert_work(worker, &fwork.work,
+ worker->work_list.next);
else
noop = true;
if (!noop)
wait_for_completion(&fwork.done);
}
-EXPORT_SYMBOL_GPL(flush_kthread_work);
+EXPORT_SYMBOL_GPL(kthread_flush_work);
/**
- * flush_kthread_worker - flush all current works on a kthread_worker
+ * kthread_flush_worker - flush all current works on a kthread_worker
* @worker: worker to flush
*
* Wait until all currently executing or pending works on @worker are
* finished.
*/
-void flush_kthread_worker(struct kthread_worker *worker)
+void kthread_flush_worker(struct kthread_worker *worker)
{
struct kthread_flush_work fwork = {
KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
COMPLETION_INITIALIZER_ONSTACK(fwork.done),
};
- queue_kthread_work(worker, &fwork.work);
+ kthread_queue_work(worker, &fwork.work);
wait_for_completion(&fwork.done);
}
-EXPORT_SYMBOL_GPL(flush_kthread_worker);
+EXPORT_SYMBOL_GPL(kthread_flush_worker);
spin_unlock_irqrestore(&sst->spinlock, flags);
/* continue to send any remaining messages... */
- queue_kthread_work(&ipc->kworker, &ipc->kwork);
+ kthread_queue_work(&ipc->kworker, &ipc->kwork);
return IRQ_HANDLED;
}
list_add_tail(&msg->list, &ipc->tx_list);
spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
- queue_kthread_work(&ipc->kworker, &ipc->kwork);
+ kthread_queue_work(&ipc->kworker, &ipc->kwork);
if (wait)
return tx_wait_done(ipc, msg, rx_data);
return -ENOMEM;
/* start the IPC message thread */
- init_kthread_worker(&ipc->kworker);
+ kthread_init_worker(&ipc->kworker);
ipc->tx_thread = kthread_run(kthread_worker_fn,
&ipc->kworker, "%s",
dev_name(ipc->dev));
return ret;
}
- init_kthread_work(&ipc->kwork, ipc_tx_msgs);
+ kthread_init_work(&ipc->kwork, ipc_tx_msgs);
return 0;
}
EXPORT_SYMBOL_GPL(sst_ipc_init);
spin_unlock_irqrestore(&sst->spinlock, flags);
/* continue to send any remaining messages... */
- queue_kthread_work(&ipc->kworker, &ipc->kwork);
+ kthread_queue_work(&ipc->kworker, &ipc->kwork);
return IRQ_HANDLED;
}
skl_ipc_int_enable(dsp);
/* continue to send any remaining messages... */
- queue_kthread_work(&ipc->kworker, &ipc->kwork);
+ kthread_queue_work(&ipc->kworker, &ipc->kwork);
return IRQ_HANDLED;
}