void __iomem *ath79_pll_base;
void __iomem *ath79_reset_base;
EXPORT_SYMBOL_GPL(ath79_reset_base);
-void __iomem *ath79_ddr_base;
+static void __iomem *ath79_ddr_base;
+static void __iomem *ath79_ddr_wb_flush_base;
+static void __iomem *ath79_ddr_pci_win_base;
+
+void ath79_ddr_ctrl_init(void)
+{
+ ath79_ddr_base = ioremap_nocache(AR71XX_DDR_CTRL_BASE,
+ AR71XX_DDR_CTRL_SIZE);
+ if (soc_is_ar71xx() || soc_is_ar934x()) {
+ ath79_ddr_wb_flush_base = ath79_ddr_base + 0x9c;
+ ath79_ddr_pci_win_base = ath79_ddr_base + 0x7c;
+ } else {
+ ath79_ddr_wb_flush_base = ath79_ddr_base + 0x7c;
+ ath79_ddr_pci_win_base = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
void ath79_ddr_wb_flush(u32 reg)
{
- void __iomem *flush_reg = ath79_ddr_base + reg;
+ void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
/* Flush the DDR write buffer. */
__raw_writel(0x1, flush_reg);
}
EXPORT_SYMBOL_GPL(ath79_ddr_wb_flush);
+void ath79_ddr_set_pci_windows(void)
+{
+ BUG_ON(!ath79_ddr_pci_win_base);
+
+ __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0);
+ __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 1);
+ __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 2);
+ __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 3);
+ __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 4);
+ __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 5);
+ __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 6);
+ __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 7);
+}
+EXPORT_SYMBOL_GPL(ath79_ddr_set_pci_windows);
+
void ath79_device_reset_set(u32 mask)
{
unsigned long flags;
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
-static void (*ath79_ip2_handler)(void);
-static void (*ath79_ip3_handler)(void);
-
static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
{
void __iomem *base = ath79_reset_base;
status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS);
if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) {
- ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_PCIE);
+ ath79_ddr_wb_flush(3);
generic_handle_irq(ATH79_IP2_IRQ(0));
} else if (status & AR934X_PCIE_WMAC_INT_WMAC_ALL) {
- ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_WMAC);
+ ath79_ddr_wb_flush(4);
generic_handle_irq(ATH79_IP2_IRQ(1));
} else {
spurious_interrupt();
irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch);
}
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned long pending;
-
- pending = read_c0_status() & read_c0_cause() & ST0_IM;
-
- if (pending & STATUSF_IP7)
- do_IRQ(ATH79_CPU_IRQ(7));
-
- else if (pending & STATUSF_IP2)
- ath79_ip2_handler();
-
- else if (pending & STATUSF_IP4)
- do_IRQ(ATH79_CPU_IRQ(4));
-
- else if (pending & STATUSF_IP5)
- do_IRQ(ATH79_CPU_IRQ(5));
-
- else if (pending & STATUSF_IP3)
- ath79_ip3_handler();
-
- else if (pending & STATUSF_IP6)
- do_IRQ(ATH79_CPU_IRQ(6));
-
- else
- spurious_interrupt();
-}
-
/*
* The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
* these devices typically allocate coherent DMA memory, however the
* DMA controller may still have some unsynchronized data in the FIFO.
* Issue a flush in the handlers to ensure that the driver sees
* the update.
+ *
+ * This array map the interrupt lines to the DDR write buffer channels.
*/
-static void ath79_default_ip2_handler(void)
-{
- do_IRQ(ATH79_CPU_IRQ(2));
-}
-
-static void ath79_default_ip3_handler(void)
-{
- do_IRQ(ATH79_CPU_IRQ(3));
-}
-
-static void ar71xx_ip2_handler(void)
-{
- ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_PCI);
- do_IRQ(ATH79_CPU_IRQ(2));
-}
-
-static void ar724x_ip2_handler(void)
-{
- ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_PCIE);
- do_IRQ(ATH79_CPU_IRQ(2));
-}
-
-static void ar913x_ip2_handler(void)
-{
- ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_WMAC);
- do_IRQ(ATH79_CPU_IRQ(2));
-}
-
-static void ar933x_ip2_handler(void)
-{
- ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_WMAC);
- do_IRQ(ATH79_CPU_IRQ(2));
-}
-
-static void ar71xx_ip3_handler(void)
-{
- ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ(3));
-}
+static unsigned irq_wb_chan[8] = {
+ -1, -1, -1, -1, -1, -1, -1, -1,
+};
-static void ar724x_ip3_handler(void)
+asmlinkage void plat_irq_dispatch(void)
{
- ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ(3));
-}
+ unsigned long pending;
+ int irq;
-static void ar913x_ip3_handler(void)
-{
- ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ(3));
-}
+ pending = read_c0_status() & read_c0_cause() & ST0_IM;
-static void ar933x_ip3_handler(void)
-{
- ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ(3));
-}
+ if (!pending) {
+ spurious_interrupt();
+ return;
+ }
-static void ar934x_ip3_handler(void)
-{
- ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ(3));
+ pending >>= CAUSEB_IP;
+ while (pending) {
+ irq = fls(pending) - 1;
+ if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1)
+ ath79_ddr_wb_flush(irq_wb_chan[irq]);
+ do_IRQ(MIPS_CPU_IRQ_BASE + irq);
+ pending &= ~BIT(irq);
+ }
}
void __init arch_init_irq(void)
{
- if (soc_is_ar71xx()) {
- ath79_ip2_handler = ar71xx_ip2_handler;
- ath79_ip3_handler = ar71xx_ip3_handler;
- } else if (soc_is_ar724x()) {
- ath79_ip2_handler = ar724x_ip2_handler;
- ath79_ip3_handler = ar724x_ip3_handler;
- } else if (soc_is_ar913x()) {
- ath79_ip2_handler = ar913x_ip2_handler;
- ath79_ip3_handler = ar913x_ip3_handler;
- } else if (soc_is_ar933x()) {
- ath79_ip2_handler = ar933x_ip2_handler;
- ath79_ip3_handler = ar933x_ip3_handler;
+ if (soc_is_ar71xx() || soc_is_ar724x() ||
+ soc_is_ar913x() || soc_is_ar933x()) {
+ irq_wb_chan[2] = 3;
+ irq_wb_chan[3] = 2;
} else if (soc_is_ar934x()) {
- ath79_ip2_handler = ath79_default_ip2_handler;
- ath79_ip3_handler = ar934x_ip3_handler;
- } else if (soc_is_qca955x()) {
- ath79_ip2_handler = ath79_default_ip2_handler;
- ath79_ip3_handler = ath79_default_ip3_handler;
- } else {
- BUG();
+ irq_wb_chan[3] = 2;
}
mips_cpu_irq_init();