/* Assign HW handshaking interface (x) to destination / source peripheral */
#define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11)
#define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7)
+#define DMA_CFG_HW_CH_PRIOR(int_num) (((int_num) & 0xF) << 5)
#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master))
/*
u32 dma_interrupt_count;
struct ahb_dma_regs *sata_dma_regs;
struct device *dwc_dev;
+ int dma_channel;
};
struct sata_dwc_host_priv host_pvt;
/*
*/
static int dma_request_channel(void)
{
- int i;
-
- for (i = 0; i < DMA_NUM_CHANS; i++) {
- if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) &\
- DMA_CHANNEL(i)))
- return i;
- }
- dev_err(host_pvt.dwc_dev, "%s NO channel chan_en: 0x%08x\n", __func__,
- in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)));
+ /* Check if the channel is not currently in use */
+ if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) &
+ DMA_CHANNEL(host_pvt.dma_channel)))
+ return host_pvt.dma_channel;
+ dev_err(host_pvt.dwc_dev, "%s Channel %d is currently in use\n",
+ __func__, host_pvt.dma_channel);
return -1;
}
dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n",
tfr_reg, err_reg, hsdevp->dma_pending[tag], port);
- for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
+ chan = host_pvt.dma_channel;
+ if (chan >= 0) {
/* Check for end-of-transfer interrupt. */
if (tfr_reg & DMA_CHANNEL(chan)) {
/*
static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq)
{
int retval = 0;
- int chan;
+ int chan = host_pvt.dma_channel;
- for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
+ if (chan >= 0) {
/* Unmask error interrupt */
out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low,
DMA_ENABLE_CHAN(chan));
int fis_len = 0;
dma_addr_t next_llp;
int bl;
+ int sms_val, dms_val;
+ sms_val = 0;
+ dms_val = 1 + host_pvt.dma_channel;
dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x"
" dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
(u32)dmadr_addr);
lli[idx].ctl.low = cpu_to_le32(
DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
- DMA_CTL_SMS(0) |
- DMA_CTL_DMS(1) |
+ DMA_CTL_SMS(sms_val) |
+ DMA_CTL_DMS(dms_val) |
DMA_CTL_SRC_MSIZE(bl) |
DMA_CTL_DST_MSIZE(bl) |
DMA_CTL_SINC_NOCHANGE |
lli[idx].ctl.low = cpu_to_le32(
DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
- DMA_CTL_SMS(1) |
- DMA_CTL_DMS(0) |
+ DMA_CTL_SMS(dms_val) |
+ DMA_CTL_DMS(sms_val) |
DMA_CTL_SRC_MSIZE(bl) |
DMA_CTL_DST_MSIZE(bl) |
DMA_CTL_DINC_NOCHANGE |
/* Program the CFG register. */
out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high),
+ DMA_CFG_HW_HS_SRC(dma_ch) | DMA_CFG_HW_HS_DEST(dma_ch) |
DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ);
- out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low), 0);
+ out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low),
+ DMA_CFG_HW_CH_PRIOR(dma_ch));
/* Program the address of the linked list */
out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low),
struct ata_host *host;
struct ata_port_info pi = sata_dwc_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct device_node *np = ofdev->dev.of_node;
+ u32 dma_chan;
/* Allocate DWC SATA device */
hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
goto error;
}
+ if (of_property_read_u32(np, "dma-channel", &dma_chan)) {
+ dev_warn(&ofdev->dev, "no dma-channel property set."
+ " Use channel 0\n");
+ dma_chan = 0;
+ }
+ host_pvt.dma_channel = dma_chan;
+
/* Ioremap SATA registers */
base = of_iomap(ofdev->dev.of_node, 0);
if (!base) {