From 2ba05622b8b143b0c95968ba59bddfbd6d2f2559 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 6 Jan 2009 11:38:14 -0700 Subject: [PATCH] dmaengine: provide a common 'issue_pending_all' implementation async_tx and net_dma each have open-coded versions of issue_pending_all, so provide a common routine in dmaengine. The implementation needs to walk the global device list, so implement rcu to allow dma_issue_pending_all to run lockless. Clients protect themselves from channel removal events by holding a dmaengine reference. Reviewed-by: Andrew Morton Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 12 ------------ drivers/dma/dmaengine.c | 27 ++++++++++++++++++++++++--- include/linux/async_tx.h | 2 +- include/linux/dmaengine.h | 1 + net/core/dev.c | 9 +-------- 5 files changed, 27 insertions(+), 24 deletions(-) diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index b88bb1f608fc..2cdf7a0867b7 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -45,18 +45,6 @@ static DEFINE_SPINLOCK(async_tx_lock); static LIST_HEAD(async_tx_master_list); -/* async_tx_issue_pending_all - start all transactions on all channels */ -void async_tx_issue_pending_all(void) -{ - struct dma_chan_ref *ref; - - rcu_read_lock(); - list_for_each_entry_rcu(ref, &async_tx_master_list, node) - ref->chan->device->device_issue_pending(ref->chan); - rcu_read_unlock(); -} -EXPORT_SYMBOL_GPL(async_tx_issue_pending_all); - static void free_dma_chan_ref(struct rcu_head *rcu) { diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 87a8cd4791ed..418eca28d472 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -70,6 +70,7 @@ #include #include #include +#include static DEFINE_MUTEX(dma_list_mutex); static LIST_HEAD(dma_device_list); @@ -365,6 +366,26 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) } EXPORT_SYMBOL(dma_find_channel); +/** + * dma_issue_pending_all - flush all pending operations across all channels + */ +void dma_issue_pending_all(void) +{ + struct dma_device *device; + struct dma_chan *chan; + + WARN_ONCE(dmaengine_ref_count == 0, + "client called %s without a reference", __func__); + + rcu_read_lock(); + list_for_each_entry_rcu(device, &dma_device_list, global_node) + list_for_each_entry(chan, &device->channels, device_node) + if (chan->client_count) + device->device_issue_pending(chan); + rcu_read_unlock(); +} +EXPORT_SYMBOL(dma_issue_pending_all); + /** * nth_chan - returns the nth channel of the given capability * @cap: capability to match @@ -490,7 +511,7 @@ void dma_async_client_register(struct dma_client *client) err = dma_chan_get(chan); if (err == -ENODEV) { /* module removed before we could use it */ - list_del_init(&device->global_node); + list_del_rcu(&device->global_node); break; } else if (err) pr_err("dmaengine: failed to get %s: (%d)\n", @@ -635,7 +656,7 @@ int dma_async_device_register(struct dma_device *device) goto err_out; } } - list_add_tail(&device->global_node, &dma_device_list); + list_add_tail_rcu(&device->global_node, &dma_device_list); dma_channel_rebalance(); mutex_unlock(&dma_list_mutex); @@ -677,7 +698,7 @@ void dma_async_device_unregister(struct dma_device *device) struct dma_chan *chan; mutex_lock(&dma_list_mutex); - list_del(&device->global_node); + list_del_rcu(&device->global_node); dma_channel_rebalance(); mutex_unlock(&dma_list_mutex); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 1c816775f135..45f6297821bd 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -59,7 +59,7 @@ enum async_tx_flags { }; #ifdef CONFIG_DMA_ENGINE -void async_tx_issue_pending_all(void); +#define async_tx_issue_pending_all dma_issue_pending_all #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL #include #else diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index b466f02e2433..57a43adfc39e 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -471,6 +471,7 @@ int dma_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); +void dma_issue_pending_all(void); /* --- Helper iov-locking functions --- */ diff --git a/net/core/dev.c b/net/core/dev.c index 09c66a449da6..e40b0d57f8ff 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2635,14 +2635,7 @@ out: * There may not be any more sk_buffs coming right now, so push * any pending DMA copies to hardware */ - if (!cpus_empty(net_dma.channel_mask)) { - int chan_idx; - for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) { - struct dma_chan *chan = net_dma.channels[chan_idx]; - if (chan) - dma_async_memcpy_issue_pending(chan); - } - } + dma_issue_pending_all(); #endif return; -- 2.20.1