* Remove useless 64 bit DMA specific SCRIPTS,
* when this feature is not available.
*/
- if (!np->use_dac) {
+ if (!use_dac(np)) {
scripta0->is_dmap_dirty[0] = cpu_to_scr(SCR_NO_OP);
scripta0->is_dmap_dirty[1] = 0;
scripta0->is_dmap_dirty[2] = cpu_to_scr(SCR_NO_OP);
sym_mfree_dma(np, sizeof(*np), "HCB");
}
-/*
- * Ask/tell the system about DMA addressing.
- */
-static int sym_setup_bus_dma_mask(struct sym_hcb *np)
-{
-#if SYM_CONF_DMA_ADDRESSING_MODE > 0
-#if SYM_CONF_DMA_ADDRESSING_MODE == 1
-#define DMA_DAC_MASK DMA_40BIT_MASK
-#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
-#define DMA_DAC_MASK DMA_64BIT_MASK
-#endif
- if ((np->features & FE_DAC) &&
- !pci_set_dma_mask(np->s.device, DMA_DAC_MASK)) {
- np->use_dac = 1;
- return 0;
- }
-#endif
-
- if (!pci_set_dma_mask(np->s.device, DMA_32BIT_MASK))
- return 0;
-
- printf_warning("%s: No suitable DMA available\n", sym_name(np));
- return -1;
-}
-
/*
* Host attach and initialisations.
*
strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name));
sprintf(np->s.inst_name, "sym%d", np->s.unit);
- if (sym_setup_bus_dma_mask(np))
+ if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) &&
+ !pci_set_dma_mask(np->s.device, DMA_DAC_MASK)) {
+ set_dac(np);
+ } else if (pci_set_dma_mask(np->s.device, DMA_32BIT_MASK)) {
+ printf_warning("%s: No suitable DMA available\n", sym_name(np));
goto attach_failed;
+ }
/*
* Try to map the controller chip to
* 64 bit addressing (895A/896/1010) ?
*/
if (np->features & FE_DAC) {
-#if SYM_CONF_DMA_ADDRESSING_MODE == 0
- np->rv_ccntl1 |= (DDAC);
-#elif SYM_CONF_DMA_ADDRESSING_MODE == 1
- if (!np->use_dac)
- np->rv_ccntl1 |= (DDAC);
- else
- np->rv_ccntl1 |= (XTIMOD | EXTIBMV);
-#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
- if (!np->use_dac)
- np->rv_ccntl1 |= (DDAC);
- else
- np->rv_ccntl1 |= (0 | EXTIBMV);
-#endif
+ if (!use_dac(np))
+ np->rv_ccntl1 |= (DDAC);
+ else if (SYM_CONF_DMA_ADDRESSING_MODE == 1)
+ np->rv_ccntl1 |= (XTIMOD | EXTIBMV);
+ else if (SYM_CONF_DMA_ADDRESSING_MODE == 2)
+ np->rv_ccntl1 |= (0 | EXTIBMV);
}
/*
{
int i;
- if (!np->use_dac)
+ if (!use_dac(np))
goto weird;
/* Look up existing mappings */
* Set up scratch C and DRS IO registers to map the 32 bit
* DMA address range our data structures are located in.
*/
- if (np->use_dac) {
+ if (use_dac(np)) {
np->dmap_bah[0] = 0; /* ??? */
OUTL(np, nc_scrx[0], np->dmap_bah[0]);
OUTL(np, nc_drs, np->dmap_bah[0]);
#endif
};
+#if SYM_CONF_DMA_ADDRESSING_MODE == 0
+#define use_dac(np) 0
+#define set_dac(np) do { } while (0)
+#else
+#define use_dac(np) (np)->use_dac
+#define set_dac(np) (np)->use_dac = 1
+#endif
+
#define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl))
*/
#if SYM_CONF_DMA_ADDRESSING_MODE == 0
+#define DMA_DAC_MASK DMA_32BIT_MASK
#define sym_build_sge(np, data, badd, len) \
do { \
(data)->addr = cpu_to_scr(badd); \
(data)->size = cpu_to_scr(len); \
} while (0)
#elif SYM_CONF_DMA_ADDRESSING_MODE == 1
+#define DMA_DAC_MASK DMA_40BIT_MASK
#define sym_build_sge(np, data, badd, len) \
do { \
(data)->addr = cpu_to_scr(badd); \
(data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \
} while (0)
#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
+#define DMA_DAC_MASK DMA_64BIT_MASK
int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s);
static __inline void
sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len)