#define FEC_QUIRK_USE_GASKET (1 << 2)
/* Controller has GBIT support */
#define FEC_QUIRK_HAS_GBIT (1 << 3)
+/* Controller has extend desc buffer */
+#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
static struct platform_device_id fec_devtype[] = {
{
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
}, {
.name = "imx6q-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT,
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX,
}, {
/* sentinel */
}
#endif
#endif /* CONFIG_M5272 */
-#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
+#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
#error "FEC: descriptor ring size constants too large"
#endif
static int mii_cnt;
+static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex)
+{
+ struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
+ if (is_ex)
+ return (struct bufdesc *)(ex + 1);
+ else
+ return bdp + 1;
+}
+
+static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex)
+{
+ struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
+ if (is_ex)
+ return (struct bufdesc *)(ex - 1);
+ else
+ return bdp - 1;
+}
+
static void *swap_buffer(void *bufaddr, int len)
{
int i;
*/
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
unsigned int index;
- index = bdp - fep->tx_bd_base;
+ if (fep->bufdesc_ex)
+ index = (struct bufdesc_ex *)bdp -
+ (struct bufdesc_ex *)fep->tx_bd_base;
+ else
+ index = bdp - fep->tx_bd_base;
memcpy(fep->tx_bounce[index], skb->data, skb->len);
bufaddr = fep->tx_bounce[index];
}
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
bdp->cbd_sc = status;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_bdu = 0;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ if (fep->bufdesc_ex) {
+
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_bdu = 0;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
fep->hwts_tx_en)) {
- bdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
+ ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- } else {
+ } else {
- bdp->cbd_esc = BD_ENET_TX_INT;
+ ebdp->cbd_esc = BD_ENET_TX_INT;
+ }
}
-#endif
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
if (bdp == fep->dirty_tx) {
fep->tx_full = 1;
/* Set receive and transmit descriptor base. */
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
- writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
- fep->hwp + FEC_X_DES_START);
+ if (fep->bufdesc_ex)
+ writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
+ * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
+ else
+ writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
+ * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
fep->cur_rx = fep->rx_bd_base;
writel(1 << 8, fep->hwp + FEC_X_WMRK);
}
-#ifdef CONFIG_FEC_PTP
- ecntl |= (1 << 4);
-#endif
+ if (fep->bufdesc_ex)
+ ecntl |= (1 << 4);
/* And last, enable the transmit and receive processing */
writel(ecntl, fep->hwp + FEC_ECNTRL);
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
-#ifdef CONFIG_FEC_PTP
- fec_ptp_start_cyclecounter(ndev);
-#endif
+ if (fep->bufdesc_ex)
+ fec_ptp_start_cyclecounter(ndev);
+
/* Enable interrupts we wish to service */
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
}
ndev->stats.tx_packets++;
}
-#ifdef CONFIG_FEC_PTP
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ fep->bufdesc_ex) {
struct skb_shared_hwtstamps shhwtstamps;
unsigned long flags;
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
spin_lock_irqsave(&fep->tmreg_lock, flags);
shhwtstamps.hwtstamp = ns_to_ktime(
- timecounter_cyc2time(&fep->tc, bdp->ts));
+ timecounter_cyc2time(&fep->tc, ebdp->ts));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
skb_tstamp_tx(skb, &shhwtstamps);
}
-#endif
+
if (status & BD_ENET_TX_READY)
printk("HEY! Enet xmit interrupt and TX_READY.\n");
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
/* Since we have freed up a buffer, the ring is no longer full
*/
skb_put(skb, pkt_len - 4); /* Make room */
skb_copy_to_linear_data(skb, data, pkt_len - 4);
skb->protocol = eth_type_trans(skb, ndev);
-#ifdef CONFIG_FEC_PTP
+
/* Get receive timestamp from the skb */
- if (fep->hwts_rx_en) {
+ if (fep->hwts_rx_en && fep->bufdesc_ex) {
struct skb_shared_hwtstamps *shhwtstamps =
skb_hwtstamps(skb);
unsigned long flags;
+ struct bufdesc_ex *ebdp =
+ (struct bufdesc_ex *)bdp;
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
spin_lock_irqsave(&fep->tmreg_lock, flags);
shhwtstamps->hwtstamp = ns_to_ktime(
- timecounter_cyc2time(&fep->tc, bdp->ts));
+ timecounter_cyc2time(&fep->tc, ebdp->ts));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
-#endif
+
if (!skb_defer_rx_timestamp(skb))
netif_rx(skb);
}
status |= BD_ENET_RX_EMPTY;
bdp->cbd_sc = status;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_esc = BD_ENET_RX_INT;
- bdp->cbd_prot = 0;
- bdp->cbd_bdu = 0;
-#endif
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ ebdp->cbd_prot = 0;
+ ebdp->cbd_bdu = 0;
+ }
/* Update BD pointer to next entry */
if (status & BD_ENET_RX_WRAP)
bdp = fep->rx_bd_base;
else
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
/* Doing this here will keep the FEC running while we process
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
if (!phydev)
return -ENODEV;
-#ifdef CONFIG_FEC_PTP
- if (cmd == SIOCSHWTSTAMP)
+ if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex)
return fec_ptp_ioctl(ndev, rq, cmd);
-#endif
+
return phy_mii_ioctl(phydev, rq, cmd);
}
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
if (skb)
dev_kfree_skb(skb);
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
bdp = fep->tx_bd_base;
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
bdp->cbd_sc = BD_ENET_RX_EMPTY;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_esc = BD_ENET_RX_INT;
-#endif
- bdp++;
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap. */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
bdp = fep->tx_bd_base;
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_esc = BD_ENET_RX_INT;
-#endif
- bdp++;
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap. */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
return 0;
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
- fep->tx_bd_base = cbd_base + RX_RING_SIZE;
+ if (fep->bufdesc_ex)
+ fep->tx_bd_base = (struct bufdesc *)
+ (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE);
+ else
+ fep->tx_bd_base = cbd_base + RX_RING_SIZE;
/* The FEC Ethernet specific entries in the device structure */
ndev->watchdog_timeo = TX_TIMEOUT;
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
/* ...and the same for transmit */
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
fec_restart(ndev, 0);
fep->pdev = pdev;
fep->dev_id = dev_id++;
+ fep->bufdesc_ex = 0;
+
if (!fep->hwp) {
ret = -ENOMEM;
goto failed_ioremap;
goto failed_clk;
}
-#ifdef CONFIG_FEC_PTP
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
+ fep->bufdesc_ex =
+ pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
if (IS_ERR(fep->clk_ptp)) {
ret = PTR_ERR(fep->clk_ptp);
- goto failed_clk;
+ fep->bufdesc_ex = 0;
}
-#endif
clk_prepare_enable(fep->clk_ahb);
clk_prepare_enable(fep->clk_ipg);
-#ifdef CONFIG_FEC_PTP
- clk_prepare_enable(fep->clk_ptp);
-#endif
+ if (!IS_ERR(fep->clk_ptp))
+ clk_prepare_enable(fep->clk_ptp);
+
reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(reg_phy)) {
ret = regulator_enable(reg_phy);
if (ret)
goto failed_register;
-#ifdef CONFIG_FEC_PTP
- fec_ptp_init(ndev, pdev);
-#endif
+ if (fep->bufdesc_ex)
+ fec_ptp_init(ndev, pdev);
return 0;
failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
-#ifdef CONFIG_FEC_PTP
- clk_disable_unprepare(fep->clk_ptp);
-#endif
+ if (!IS_ERR(fep->clk_ptp))
+ clk_disable_unprepare(fep->clk_ptp);
failed_pin:
failed_clk:
for (i = 0; i < FEC_IRQ_NUM; i++) {
if (irq > 0)
free_irq(irq, ndev);
}
-#ifdef CONFIG_FEC_PTP
del_timer_sync(&fep->time_keep);
clk_disable_unprepare(fep->clk_ptp);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
-#endif
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
iounmap(fep->hwp);