container_of(tx, struct shdma_desc, async_tx),
*last = desc;
struct shdma_chan *schan = to_shdma_chan(tx->chan);
- struct shdma_slave *slave = schan->slave;
dma_async_tx_callback callback = tx->callback;
dma_cookie_t cookie;
bool power_up;
* Make it int then, on error remove chunks from the
* queue again
*/
- ops->setup_xfer(schan, slave);
+ ops->setup_xfer(schan, schan->slave_id);
if (schan->pm_state == SHDMA_PM_PENDING)
shdma_chan_xfer_ld_queue(schan);
* never runs concurrently with itself or free_chan_resources.
*/
if (slave) {
- if (slave->slave_id >= slave_num) {
+ if (slave->slave_id < 0 || slave->slave_id >= slave_num) {
ret = -EINVAL;
goto evalid;
}
goto etestused;
}
- ret = ops->set_slave(schan, slave);
+ ret = ops->set_slave(schan, slave->slave_id);
if (ret < 0)
goto esetslave;
+
+ schan->slave_id = slave->slave_id;
+ } else {
+ schan->slave_id = -EINVAL;
}
schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
goto edescalloc;
}
schan->desc_num = NR_DESCS_PER_CHANNEL;
- schan->slave = slave;
for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
desc = ops->embedded_desc(schan->desc, i);
if (!list_empty(&schan->ld_queue))
shdma_chan_ld_cleanup(schan, true);
- if (schan->slave) {
+ if (schan->slave_id >= 0) {
/* The caller is holding dma_list_mutex */
- struct shdma_slave *slave = schan->slave;
- clear_bit(slave->slave_id, shdma_slave_used);
+ clear_bit(schan->slave_id, shdma_slave_used);
chan->private = NULL;
}
struct shdma_chan *schan = to_shdma_chan(chan);
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
- struct shdma_slave *slave = schan->slave;
+ int slave_id = schan->slave_id;
dma_addr_t slave_addr;
if (!chan)
BUG_ON(!schan->desc_num);
/* Someone calling slave DMA on a generic channel? */
- if (!slave || !sg_len) {
- dev_warn(schan->dev, "%s: bad parameter: %p, %d, %d\n",
- __func__, slave, sg_len, slave ? slave->slave_id : -1);
+ if (slave_id < 0 || !sg_len) {
+ dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
+ __func__, sg_len, slave_id);
return NULL;
}
}
static void sh_dmae_setup_xfer(struct shdma_chan *schan,
- struct shdma_slave *sslave)
+ int slave_id)
{
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
shdma_chan);
- if (sslave) {
+ if (slave_id >= 0) {
const struct sh_dmae_slave_config *cfg =
sh_chan->config;
}
static const struct sh_dmae_slave_config *dmae_find_slave(
- struct sh_dmae_chan *sh_chan, unsigned int slave_id)
+ struct sh_dmae_chan *sh_chan, int slave_id)
{
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
}
static int sh_dmae_set_slave(struct shdma_chan *schan,
- struct shdma_slave *sslave)
+ int slave_id)
{
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
shdma_chan);
- const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, sslave->slave_id);
+ const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
if (!cfg)
return -ENODEV;
if (!sh_chan->shdma_chan.desc_num)
continue;
- if (sh_chan->shdma_chan.slave) {
+ if (sh_chan->shdma_chan.slave_id >= 0) {
const struct sh_dmae_slave_config *cfg = sh_chan->config;
dmae_set_dmars(sh_chan, cfg->mid_rid);
dmae_set_chcr(sh_chan, cfg->chcr);
*/
struct shdma_slave {
- unsigned int slave_id;
+ int slave_id;
};
struct shdma_desc {
size_t max_xfer_len; /* max transfer length */
int id; /* Raw id of this channel */
int irq; /* Channel IRQ */
- struct shdma_slave *slave; /* Client data for slave DMA */
+ int slave_id; /* Client ID for slave DMA */
enum shdma_pm_state pm_state;
};
dma_addr_t (*slave_addr)(struct shdma_chan *);
int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
dma_addr_t, dma_addr_t, size_t *);
- int (*set_slave)(struct shdma_chan *, struct shdma_slave *);
- void (*setup_xfer)(struct shdma_chan *, struct shdma_slave *);
+ int (*set_slave)(struct shdma_chan *, int);
+ void (*setup_xfer)(struct shdma_chan *, int);
void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
struct shdma_desc *(*embedded_desc)(void *, int);
bool (*chan_irq)(struct shdma_chan *, int);