2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/module.h>
118 #include <linux/spinlock.h>
119 #include <linux/tcp.h>
120 #include <linux/if_vlan.h>
121 #include <net/busy_poll.h>
122 #include <linux/clk.h>
123 #include <linux/if_ether.h>
124 #include <linux/net_tstamp.h>
125 #include <linux/phy.h>
128 #include "xgbe-common.h"
130 static unsigned int ecc_sec_info_threshold
= 10;
131 static unsigned int ecc_sec_warn_threshold
= 10000;
132 static unsigned int ecc_sec_period
= 600;
133 static unsigned int ecc_ded_threshold
= 2;
134 static unsigned int ecc_ded_period
= 600;
136 #ifdef CONFIG_AMD_XGBE_HAVE_ECC
137 /* Only expose the ECC parameters if supported */
138 module_param(ecc_sec_info_threshold
, uint
, S_IWUSR
| S_IRUGO
);
139 MODULE_PARM_DESC(ecc_sec_info_threshold
,
140 " ECC corrected error informational threshold setting");
142 module_param(ecc_sec_warn_threshold
, uint
, S_IWUSR
| S_IRUGO
);
143 MODULE_PARM_DESC(ecc_sec_warn_threshold
,
144 " ECC corrected error warning threshold setting");
146 module_param(ecc_sec_period
, uint
, S_IWUSR
| S_IRUGO
);
147 MODULE_PARM_DESC(ecc_sec_period
, " ECC corrected error period (in seconds)");
149 module_param(ecc_ded_threshold
, uint
, S_IWUSR
| S_IRUGO
);
150 MODULE_PARM_DESC(ecc_ded_threshold
, " ECC detected error threshold setting");
152 module_param(ecc_ded_period
, uint
, S_IWUSR
| S_IRUGO
);
153 MODULE_PARM_DESC(ecc_ded_period
, " ECC detected error period (in seconds)");
156 static int xgbe_one_poll(struct napi_struct
*, int);
157 static int xgbe_all_poll(struct napi_struct
*, int);
158 static void xgbe_stop(struct xgbe_prv_data
*);
160 static int xgbe_alloc_channels(struct xgbe_prv_data
*pdata
)
162 struct xgbe_channel
*channel_mem
, *channel
;
163 struct xgbe_ring
*tx_ring
, *rx_ring
;
164 unsigned int count
, i
;
167 count
= max_t(unsigned int, pdata
->tx_ring_count
, pdata
->rx_ring_count
);
169 channel_mem
= kcalloc(count
, sizeof(struct xgbe_channel
), GFP_KERNEL
);
173 tx_ring
= kcalloc(pdata
->tx_ring_count
, sizeof(struct xgbe_ring
),
178 rx_ring
= kcalloc(pdata
->rx_ring_count
, sizeof(struct xgbe_ring
),
183 for (i
= 0, channel
= channel_mem
; i
< count
; i
++, channel
++) {
184 snprintf(channel
->name
, sizeof(channel
->name
), "channel-%u", i
);
185 channel
->pdata
= pdata
;
186 channel
->queue_index
= i
;
187 channel
->dma_regs
= pdata
->xgmac_regs
+ DMA_CH_BASE
+
190 if (pdata
->per_channel_irq
)
191 channel
->dma_irq
= pdata
->channel_irq
[i
];
193 if (i
< pdata
->tx_ring_count
) {
194 spin_lock_init(&tx_ring
->lock
);
195 channel
->tx_ring
= tx_ring
++;
198 if (i
< pdata
->rx_ring_count
) {
199 spin_lock_init(&rx_ring
->lock
);
200 channel
->rx_ring
= rx_ring
++;
203 netif_dbg(pdata
, drv
, pdata
->netdev
,
204 "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
205 channel
->name
, channel
->dma_regs
, channel
->dma_irq
,
206 channel
->tx_ring
, channel
->rx_ring
);
209 pdata
->channel
= channel_mem
;
210 pdata
->channel_count
= count
;
224 static void xgbe_free_channels(struct xgbe_prv_data
*pdata
)
229 kfree(pdata
->channel
->rx_ring
);
230 kfree(pdata
->channel
->tx_ring
);
231 kfree(pdata
->channel
);
233 pdata
->channel
= NULL
;
234 pdata
->channel_count
= 0;
237 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring
*ring
)
239 return (ring
->rdesc_count
- (ring
->cur
- ring
->dirty
));
242 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring
*ring
)
244 return (ring
->cur
- ring
->dirty
);
247 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel
*channel
,
248 struct xgbe_ring
*ring
, unsigned int count
)
250 struct xgbe_prv_data
*pdata
= channel
->pdata
;
252 if (count
> xgbe_tx_avail_desc(ring
)) {
253 netif_info(pdata
, drv
, pdata
->netdev
,
254 "Tx queue stopped, not enough descriptors available\n");
255 netif_stop_subqueue(pdata
->netdev
, channel
->queue_index
);
256 ring
->tx
.queue_stopped
= 1;
258 /* If we haven't notified the hardware because of xmit_more
259 * support, tell it now
261 if (ring
->tx
.xmit_more
)
262 pdata
->hw_if
.tx_start_xmit(channel
, ring
);
264 return NETDEV_TX_BUSY
;
270 static int xgbe_calc_rx_buf_size(struct net_device
*netdev
, unsigned int mtu
)
272 unsigned int rx_buf_size
;
274 rx_buf_size
= mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
275 rx_buf_size
= clamp_val(rx_buf_size
, XGBE_RX_MIN_BUF_SIZE
, PAGE_SIZE
);
277 rx_buf_size
= (rx_buf_size
+ XGBE_RX_BUF_ALIGN
- 1) &
278 ~(XGBE_RX_BUF_ALIGN
- 1);
283 static void xgbe_enable_rx_tx_int(struct xgbe_prv_data
*pdata
,
284 struct xgbe_channel
*channel
)
286 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
287 enum xgbe_int int_id
;
289 if (channel
->tx_ring
&& channel
->rx_ring
)
290 int_id
= XGMAC_INT_DMA_CH_SR_TI_RI
;
291 else if (channel
->tx_ring
)
292 int_id
= XGMAC_INT_DMA_CH_SR_TI
;
293 else if (channel
->rx_ring
)
294 int_id
= XGMAC_INT_DMA_CH_SR_RI
;
298 hw_if
->enable_int(channel
, int_id
);
301 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data
*pdata
)
303 struct xgbe_channel
*channel
;
306 channel
= pdata
->channel
;
307 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
308 xgbe_enable_rx_tx_int(pdata
, channel
);
311 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data
*pdata
,
312 struct xgbe_channel
*channel
)
314 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
315 enum xgbe_int int_id
;
317 if (channel
->tx_ring
&& channel
->rx_ring
)
318 int_id
= XGMAC_INT_DMA_CH_SR_TI_RI
;
319 else if (channel
->tx_ring
)
320 int_id
= XGMAC_INT_DMA_CH_SR_TI
;
321 else if (channel
->rx_ring
)
322 int_id
= XGMAC_INT_DMA_CH_SR_RI
;
326 hw_if
->disable_int(channel
, int_id
);
329 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data
*pdata
)
331 struct xgbe_channel
*channel
;
334 channel
= pdata
->channel
;
335 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
336 xgbe_disable_rx_tx_int(pdata
, channel
);
339 static bool xgbe_ecc_sec(struct xgbe_prv_data
*pdata
, unsigned long *period
,
340 unsigned int *count
, const char *area
)
342 if (time_before(jiffies
, *period
)) {
345 *period
= jiffies
+ (ecc_sec_period
* HZ
);
349 if (*count
> ecc_sec_info_threshold
)
350 dev_warn_once(pdata
->dev
,
351 "%s ECC corrected errors exceed informational threshold\n",
354 if (*count
> ecc_sec_warn_threshold
) {
355 dev_warn_once(pdata
->dev
,
356 "%s ECC corrected errors exceed warning threshold\n",
364 static bool xgbe_ecc_ded(struct xgbe_prv_data
*pdata
, unsigned long *period
,
365 unsigned int *count
, const char *area
)
367 if (time_before(jiffies
, *period
)) {
370 *period
= jiffies
+ (ecc_ded_period
* HZ
);
374 if (*count
> ecc_ded_threshold
) {
375 netdev_alert(pdata
->netdev
,
376 "%s ECC detected errors exceed threshold\n",
384 static irqreturn_t
xgbe_ecc_isr(int irq
, void *data
)
386 struct xgbe_prv_data
*pdata
= data
;
387 unsigned int ecc_isr
;
390 /* Mask status with only the interrupts we care about */
391 ecc_isr
= XP_IOREAD(pdata
, XP_ECC_ISR
);
392 ecc_isr
&= XP_IOREAD(pdata
, XP_ECC_IER
);
393 netif_dbg(pdata
, intr
, pdata
->netdev
, "ECC_ISR=%#010x\n", ecc_isr
);
395 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, TX_DED
)) {
396 stop
|= xgbe_ecc_ded(pdata
, &pdata
->tx_ded_period
,
397 &pdata
->tx_ded_count
, "TX fifo");
400 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, RX_DED
)) {
401 stop
|= xgbe_ecc_ded(pdata
, &pdata
->rx_ded_period
,
402 &pdata
->rx_ded_count
, "RX fifo");
405 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, DESC_DED
)) {
406 stop
|= xgbe_ecc_ded(pdata
, &pdata
->desc_ded_period
,
407 &pdata
->desc_ded_count
,
412 pdata
->hw_if
.disable_ecc_ded(pdata
);
413 schedule_work(&pdata
->stopdev_work
);
417 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, TX_SEC
)) {
418 if (xgbe_ecc_sec(pdata
, &pdata
->tx_sec_period
,
419 &pdata
->tx_sec_count
, "TX fifo"))
420 pdata
->hw_if
.disable_ecc_sec(pdata
, XGBE_ECC_SEC_TX
);
423 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, RX_SEC
))
424 if (xgbe_ecc_sec(pdata
, &pdata
->rx_sec_period
,
425 &pdata
->rx_sec_count
, "RX fifo"))
426 pdata
->hw_if
.disable_ecc_sec(pdata
, XGBE_ECC_SEC_RX
);
428 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, DESC_SEC
))
429 if (xgbe_ecc_sec(pdata
, &pdata
->desc_sec_period
,
430 &pdata
->desc_sec_count
, "descriptor cache"))
431 pdata
->hw_if
.disable_ecc_sec(pdata
, XGBE_ECC_SEC_DESC
);
434 /* Clear all ECC interrupts */
435 XP_IOWRITE(pdata
, XP_ECC_ISR
, ecc_isr
);
440 static irqreturn_t
xgbe_isr(int irq
, void *data
)
442 struct xgbe_prv_data
*pdata
= data
;
443 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
444 struct xgbe_channel
*channel
;
445 unsigned int dma_isr
, dma_ch_isr
;
446 unsigned int mac_isr
, mac_tssr
, mac_mdioisr
;
449 /* The DMA interrupt status register also reports MAC and MTL
450 * interrupts. So for polling mode, we just need to check for
451 * this register to be non-zero
453 dma_isr
= XGMAC_IOREAD(pdata
, DMA_ISR
);
457 netif_dbg(pdata
, intr
, pdata
->netdev
, "DMA_ISR=%#010x\n", dma_isr
);
459 for (i
= 0; i
< pdata
->channel_count
; i
++) {
460 if (!(dma_isr
& (1 << i
)))
463 channel
= pdata
->channel
+ i
;
465 dma_ch_isr
= XGMAC_DMA_IOREAD(channel
, DMA_CH_SR
);
466 netif_dbg(pdata
, intr
, pdata
->netdev
, "DMA_CH%u_ISR=%#010x\n",
469 /* The TI or RI interrupt bits may still be set even if using
470 * per channel DMA interrupts. Check to be sure those are not
471 * enabled before using the private data napi structure.
473 if (!pdata
->per_channel_irq
&&
474 (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, TI
) ||
475 XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, RI
))) {
476 if (napi_schedule_prep(&pdata
->napi
)) {
477 /* Disable Tx and Rx interrupts */
478 xgbe_disable_rx_tx_ints(pdata
);
480 /* Turn on polling */
481 __napi_schedule_irqoff(&pdata
->napi
);
484 /* Don't clear Rx/Tx status if doing per channel DMA
485 * interrupts, these will be cleared by the ISR for
486 * per channel DMA interrupts.
488 XGMAC_SET_BITS(dma_ch_isr
, DMA_CH_SR
, TI
, 0);
489 XGMAC_SET_BITS(dma_ch_isr
, DMA_CH_SR
, RI
, 0);
492 if (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, RBU
))
493 pdata
->ext_stats
.rx_buffer_unavailable
++;
495 /* Restart the device on a Fatal Bus Error */
496 if (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, FBE
))
497 schedule_work(&pdata
->restart_work
);
499 /* Clear interrupt signals */
500 XGMAC_DMA_IOWRITE(channel
, DMA_CH_SR
, dma_ch_isr
);
503 if (XGMAC_GET_BITS(dma_isr
, DMA_ISR
, MACIS
)) {
504 mac_isr
= XGMAC_IOREAD(pdata
, MAC_ISR
);
506 netif_dbg(pdata
, intr
, pdata
->netdev
, "MAC_ISR=%#010x\n",
509 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, MMCTXIS
))
510 hw_if
->tx_mmc_int(pdata
);
512 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, MMCRXIS
))
513 hw_if
->rx_mmc_int(pdata
);
515 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, TSIS
)) {
516 mac_tssr
= XGMAC_IOREAD(pdata
, MAC_TSSR
);
518 netif_dbg(pdata
, intr
, pdata
->netdev
,
519 "MAC_TSSR=%#010x\n", mac_tssr
);
521 if (XGMAC_GET_BITS(mac_tssr
, MAC_TSSR
, TXTSC
)) {
522 /* Read Tx Timestamp to clear interrupt */
524 hw_if
->get_tx_tstamp(pdata
);
525 queue_work(pdata
->dev_workqueue
,
526 &pdata
->tx_tstamp_work
);
530 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, SMI
)) {
531 mac_mdioisr
= XGMAC_IOREAD(pdata
, MAC_MDIOISR
);
533 netif_dbg(pdata
, intr
, pdata
->netdev
,
534 "MAC_MDIOISR=%#010x\n", mac_mdioisr
);
536 if (XGMAC_GET_BITS(mac_mdioisr
, MAC_MDIOISR
,
538 complete(&pdata
->mdio_complete
);
543 /* If there is not a separate AN irq, handle it here */
544 if (pdata
->dev_irq
== pdata
->an_irq
)
545 pdata
->phy_if
.an_isr(irq
, pdata
);
547 /* If there is not a separate ECC irq, handle it here */
548 if (pdata
->vdata
->ecc_support
&& (pdata
->dev_irq
== pdata
->ecc_irq
))
549 xgbe_ecc_isr(irq
, pdata
);
551 /* If there is not a separate I2C irq, handle it here */
552 if (pdata
->vdata
->i2c_support
&& (pdata
->dev_irq
== pdata
->i2c_irq
))
553 pdata
->i2c_if
.i2c_isr(irq
, pdata
);
558 static irqreturn_t
xgbe_dma_isr(int irq
, void *data
)
560 struct xgbe_channel
*channel
= data
;
561 struct xgbe_prv_data
*pdata
= channel
->pdata
;
562 unsigned int dma_status
;
564 /* Per channel DMA interrupts are enabled, so we use the per
565 * channel napi structure and not the private data napi structure
567 if (napi_schedule_prep(&channel
->napi
)) {
568 /* Disable Tx and Rx interrupts */
569 if (pdata
->channel_irq_mode
)
570 xgbe_disable_rx_tx_int(pdata
, channel
);
572 disable_irq_nosync(channel
->dma_irq
);
574 /* Turn on polling */
575 __napi_schedule_irqoff(&channel
->napi
);
578 /* Clear Tx/Rx signals */
580 XGMAC_SET_BITS(dma_status
, DMA_CH_SR
, TI
, 1);
581 XGMAC_SET_BITS(dma_status
, DMA_CH_SR
, RI
, 1);
582 XGMAC_DMA_IOWRITE(channel
, DMA_CH_SR
, dma_status
);
587 static void xgbe_tx_timer(unsigned long data
)
589 struct xgbe_channel
*channel
= (struct xgbe_channel
*)data
;
590 struct xgbe_prv_data
*pdata
= channel
->pdata
;
591 struct napi_struct
*napi
;
593 DBGPR("-->xgbe_tx_timer\n");
595 napi
= (pdata
->per_channel_irq
) ? &channel
->napi
: &pdata
->napi
;
597 if (napi_schedule_prep(napi
)) {
598 /* Disable Tx and Rx interrupts */
599 if (pdata
->per_channel_irq
)
600 if (pdata
->channel_irq_mode
)
601 xgbe_disable_rx_tx_int(pdata
, channel
);
603 disable_irq_nosync(channel
->dma_irq
);
605 xgbe_disable_rx_tx_ints(pdata
);
607 /* Turn on polling */
608 __napi_schedule(napi
);
611 channel
->tx_timer_active
= 0;
613 DBGPR("<--xgbe_tx_timer\n");
616 static void xgbe_service(struct work_struct
*work
)
618 struct xgbe_prv_data
*pdata
= container_of(work
,
619 struct xgbe_prv_data
,
622 pdata
->phy_if
.phy_status(pdata
);
625 static void xgbe_service_timer(unsigned long data
)
627 struct xgbe_prv_data
*pdata
= (struct xgbe_prv_data
*)data
;
629 queue_work(pdata
->dev_workqueue
, &pdata
->service_work
);
631 mod_timer(&pdata
->service_timer
, jiffies
+ HZ
);
634 static void xgbe_init_timers(struct xgbe_prv_data
*pdata
)
636 struct xgbe_channel
*channel
;
639 setup_timer(&pdata
->service_timer
, xgbe_service_timer
,
640 (unsigned long)pdata
);
642 channel
= pdata
->channel
;
643 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
644 if (!channel
->tx_ring
)
647 setup_timer(&channel
->tx_timer
, xgbe_tx_timer
,
648 (unsigned long)channel
);
652 static void xgbe_start_timers(struct xgbe_prv_data
*pdata
)
654 mod_timer(&pdata
->service_timer
, jiffies
+ HZ
);
657 static void xgbe_stop_timers(struct xgbe_prv_data
*pdata
)
659 struct xgbe_channel
*channel
;
662 del_timer_sync(&pdata
->service_timer
);
664 channel
= pdata
->channel
;
665 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
666 if (!channel
->tx_ring
)
669 del_timer_sync(&channel
->tx_timer
);
673 void xgbe_get_all_hw_features(struct xgbe_prv_data
*pdata
)
675 unsigned int mac_hfr0
, mac_hfr1
, mac_hfr2
;
676 struct xgbe_hw_features
*hw_feat
= &pdata
->hw_feat
;
678 DBGPR("-->xgbe_get_all_hw_features\n");
680 mac_hfr0
= XGMAC_IOREAD(pdata
, MAC_HWF0R
);
681 mac_hfr1
= XGMAC_IOREAD(pdata
, MAC_HWF1R
);
682 mac_hfr2
= XGMAC_IOREAD(pdata
, MAC_HWF2R
);
684 memset(hw_feat
, 0, sizeof(*hw_feat
));
686 hw_feat
->version
= XGMAC_IOREAD(pdata
, MAC_VR
);
688 /* Hardware feature register 0 */
689 hw_feat
->gmii
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, GMIISEL
);
690 hw_feat
->vlhash
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, VLHASH
);
691 hw_feat
->sma
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SMASEL
);
692 hw_feat
->rwk
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RWKSEL
);
693 hw_feat
->mgk
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MGKSEL
);
694 hw_feat
->mmc
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MMCSEL
);
695 hw_feat
->aoe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, ARPOFFSEL
);
696 hw_feat
->ts
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSEL
);
697 hw_feat
->eee
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, EEESEL
);
698 hw_feat
->tx_coe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TXCOESEL
);
699 hw_feat
->rx_coe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RXCOESEL
);
700 hw_feat
->addn_mac
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
,
702 hw_feat
->ts_src
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSTSSEL
);
703 hw_feat
->sa_vlan_ins
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SAVLANINS
);
705 /* Hardware feature register 1 */
706 hw_feat
->rx_fifo_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
708 hw_feat
->tx_fifo_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
710 hw_feat
->adv_ts_hi
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, ADVTHWORD
);
711 hw_feat
->dma_width
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, ADDR64
);
712 hw_feat
->dcb
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DCBEN
);
713 hw_feat
->sph
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, SPHEN
);
714 hw_feat
->tso
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, TSOEN
);
715 hw_feat
->dma_debug
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DBGMEMA
);
716 hw_feat
->rss
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, RSSEN
);
717 hw_feat
->tc_cnt
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, NUMTC
);
718 hw_feat
->hash_table_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
720 hw_feat
->l3l4_filter_num
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
723 /* Hardware feature register 2 */
724 hw_feat
->rx_q_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXQCNT
);
725 hw_feat
->tx_q_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXQCNT
);
726 hw_feat
->rx_ch_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXCHCNT
);
727 hw_feat
->tx_ch_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXCHCNT
);
728 hw_feat
->pps_out_num
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, PPSOUTNUM
);
729 hw_feat
->aux_snap_num
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, AUXSNAPNUM
);
731 /* Translate the Hash Table size into actual number */
732 switch (hw_feat
->hash_table_size
) {
736 hw_feat
->hash_table_size
= 64;
739 hw_feat
->hash_table_size
= 128;
742 hw_feat
->hash_table_size
= 256;
746 /* Translate the address width setting into actual number */
747 switch (hw_feat
->dma_width
) {
749 hw_feat
->dma_width
= 32;
752 hw_feat
->dma_width
= 40;
755 hw_feat
->dma_width
= 48;
758 hw_feat
->dma_width
= 32;
761 /* The Queue, Channel and TC counts are zero based so increment them
762 * to get the actual number
766 hw_feat
->rx_ch_cnt
++;
767 hw_feat
->tx_ch_cnt
++;
770 /* Translate the fifo sizes into actual numbers */
771 hw_feat
->rx_fifo_size
= 1 << (hw_feat
->rx_fifo_size
+ 7);
772 hw_feat
->tx_fifo_size
= 1 << (hw_feat
->tx_fifo_size
+ 7);
774 DBGPR("<--xgbe_get_all_hw_features\n");
777 static void xgbe_napi_enable(struct xgbe_prv_data
*pdata
, unsigned int add
)
779 struct xgbe_channel
*channel
;
782 if (pdata
->per_channel_irq
) {
783 channel
= pdata
->channel
;
784 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
786 netif_napi_add(pdata
->netdev
, &channel
->napi
,
787 xgbe_one_poll
, NAPI_POLL_WEIGHT
);
789 napi_enable(&channel
->napi
);
793 netif_napi_add(pdata
->netdev
, &pdata
->napi
,
794 xgbe_all_poll
, NAPI_POLL_WEIGHT
);
796 napi_enable(&pdata
->napi
);
800 static void xgbe_napi_disable(struct xgbe_prv_data
*pdata
, unsigned int del
)
802 struct xgbe_channel
*channel
;
805 if (pdata
->per_channel_irq
) {
806 channel
= pdata
->channel
;
807 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
808 napi_disable(&channel
->napi
);
811 netif_napi_del(&channel
->napi
);
814 napi_disable(&pdata
->napi
);
817 netif_napi_del(&pdata
->napi
);
821 static int xgbe_request_irqs(struct xgbe_prv_data
*pdata
)
823 struct xgbe_channel
*channel
;
824 struct net_device
*netdev
= pdata
->netdev
;
828 ret
= devm_request_irq(pdata
->dev
, pdata
->dev_irq
, xgbe_isr
, 0,
829 netdev
->name
, pdata
);
831 netdev_alert(netdev
, "error requesting irq %d\n",
836 if (pdata
->vdata
->ecc_support
&& (pdata
->dev_irq
!= pdata
->ecc_irq
)) {
837 ret
= devm_request_irq(pdata
->dev
, pdata
->ecc_irq
, xgbe_ecc_isr
,
838 0, pdata
->ecc_name
, pdata
);
840 netdev_alert(netdev
, "error requesting ecc irq %d\n",
846 if (!pdata
->per_channel_irq
)
849 channel
= pdata
->channel
;
850 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
851 snprintf(channel
->dma_irq_name
,
852 sizeof(channel
->dma_irq_name
) - 1,
853 "%s-TxRx-%u", netdev_name(netdev
),
854 channel
->queue_index
);
856 ret
= devm_request_irq(pdata
->dev
, channel
->dma_irq
,
858 channel
->dma_irq_name
, channel
);
860 netdev_alert(netdev
, "error requesting irq %d\n",
869 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
870 for (i
--, channel
--; i
< pdata
->channel_count
; i
--, channel
--)
871 devm_free_irq(pdata
->dev
, channel
->dma_irq
, channel
);
873 if (pdata
->vdata
->ecc_support
&& (pdata
->dev_irq
!= pdata
->ecc_irq
))
874 devm_free_irq(pdata
->dev
, pdata
->ecc_irq
, pdata
);
877 devm_free_irq(pdata
->dev
, pdata
->dev_irq
, pdata
);
882 static void xgbe_free_irqs(struct xgbe_prv_data
*pdata
)
884 struct xgbe_channel
*channel
;
887 devm_free_irq(pdata
->dev
, pdata
->dev_irq
, pdata
);
889 if (pdata
->vdata
->ecc_support
&& (pdata
->dev_irq
!= pdata
->ecc_irq
))
890 devm_free_irq(pdata
->dev
, pdata
->ecc_irq
, pdata
);
892 if (!pdata
->per_channel_irq
)
895 channel
= pdata
->channel
;
896 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
897 devm_free_irq(pdata
->dev
, channel
->dma_irq
, channel
);
900 void xgbe_init_tx_coalesce(struct xgbe_prv_data
*pdata
)
902 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
904 DBGPR("-->xgbe_init_tx_coalesce\n");
906 pdata
->tx_usecs
= XGMAC_INIT_DMA_TX_USECS
;
907 pdata
->tx_frames
= XGMAC_INIT_DMA_TX_FRAMES
;
909 hw_if
->config_tx_coalesce(pdata
);
911 DBGPR("<--xgbe_init_tx_coalesce\n");
914 void xgbe_init_rx_coalesce(struct xgbe_prv_data
*pdata
)
916 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
918 DBGPR("-->xgbe_init_rx_coalesce\n");
920 pdata
->rx_riwt
= hw_if
->usec_to_riwt(pdata
, XGMAC_INIT_DMA_RX_USECS
);
921 pdata
->rx_usecs
= XGMAC_INIT_DMA_RX_USECS
;
922 pdata
->rx_frames
= XGMAC_INIT_DMA_RX_FRAMES
;
924 hw_if
->config_rx_coalesce(pdata
);
926 DBGPR("<--xgbe_init_rx_coalesce\n");
929 static void xgbe_free_tx_data(struct xgbe_prv_data
*pdata
)
931 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
932 struct xgbe_channel
*channel
;
933 struct xgbe_ring
*ring
;
934 struct xgbe_ring_data
*rdata
;
937 DBGPR("-->xgbe_free_tx_data\n");
939 channel
= pdata
->channel
;
940 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
941 ring
= channel
->tx_ring
;
945 for (j
= 0; j
< ring
->rdesc_count
; j
++) {
946 rdata
= XGBE_GET_DESC_DATA(ring
, j
);
947 desc_if
->unmap_rdata(pdata
, rdata
);
951 DBGPR("<--xgbe_free_tx_data\n");
954 static void xgbe_free_rx_data(struct xgbe_prv_data
*pdata
)
956 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
957 struct xgbe_channel
*channel
;
958 struct xgbe_ring
*ring
;
959 struct xgbe_ring_data
*rdata
;
962 DBGPR("-->xgbe_free_rx_data\n");
964 channel
= pdata
->channel
;
965 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
966 ring
= channel
->rx_ring
;
970 for (j
= 0; j
< ring
->rdesc_count
; j
++) {
971 rdata
= XGBE_GET_DESC_DATA(ring
, j
);
972 desc_if
->unmap_rdata(pdata
, rdata
);
976 DBGPR("<--xgbe_free_rx_data\n");
979 static int xgbe_phy_reset(struct xgbe_prv_data
*pdata
)
981 pdata
->phy_link
= -1;
982 pdata
->phy_speed
= SPEED_UNKNOWN
;
984 return pdata
->phy_if
.phy_reset(pdata
);
987 int xgbe_powerdown(struct net_device
*netdev
, unsigned int caller
)
989 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
990 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
993 DBGPR("-->xgbe_powerdown\n");
995 if (!netif_running(netdev
) ||
996 (caller
== XGMAC_IOCTL_CONTEXT
&& pdata
->power_down
)) {
997 netdev_alert(netdev
, "Device is already powered down\n");
998 DBGPR("<--xgbe_powerdown\n");
1002 spin_lock_irqsave(&pdata
->lock
, flags
);
1004 if (caller
== XGMAC_DRIVER_CONTEXT
)
1005 netif_device_detach(netdev
);
1007 netif_tx_stop_all_queues(netdev
);
1009 xgbe_stop_timers(pdata
);
1010 flush_workqueue(pdata
->dev_workqueue
);
1012 hw_if
->powerdown_tx(pdata
);
1013 hw_if
->powerdown_rx(pdata
);
1015 xgbe_napi_disable(pdata
, 0);
1017 pdata
->power_down
= 1;
1019 spin_unlock_irqrestore(&pdata
->lock
, flags
);
1021 DBGPR("<--xgbe_powerdown\n");
1026 int xgbe_powerup(struct net_device
*netdev
, unsigned int caller
)
1028 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1029 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1030 unsigned long flags
;
1032 DBGPR("-->xgbe_powerup\n");
1034 if (!netif_running(netdev
) ||
1035 (caller
== XGMAC_IOCTL_CONTEXT
&& !pdata
->power_down
)) {
1036 netdev_alert(netdev
, "Device is already powered up\n");
1037 DBGPR("<--xgbe_powerup\n");
1041 spin_lock_irqsave(&pdata
->lock
, flags
);
1043 pdata
->power_down
= 0;
1045 xgbe_napi_enable(pdata
, 0);
1047 hw_if
->powerup_tx(pdata
);
1048 hw_if
->powerup_rx(pdata
);
1050 if (caller
== XGMAC_DRIVER_CONTEXT
)
1051 netif_device_attach(netdev
);
1053 netif_tx_start_all_queues(netdev
);
1055 xgbe_start_timers(pdata
);
1057 spin_unlock_irqrestore(&pdata
->lock
, flags
);
1059 DBGPR("<--xgbe_powerup\n");
1064 static int xgbe_start(struct xgbe_prv_data
*pdata
)
1066 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1067 struct xgbe_phy_if
*phy_if
= &pdata
->phy_if
;
1068 struct net_device
*netdev
= pdata
->netdev
;
1071 DBGPR("-->xgbe_start\n");
1073 ret
= hw_if
->init(pdata
);
1077 xgbe_napi_enable(pdata
, 1);
1079 ret
= xgbe_request_irqs(pdata
);
1083 ret
= phy_if
->phy_start(pdata
);
1087 hw_if
->enable_tx(pdata
);
1088 hw_if
->enable_rx(pdata
);
1090 netif_tx_start_all_queues(netdev
);
1092 xgbe_start_timers(pdata
);
1093 queue_work(pdata
->dev_workqueue
, &pdata
->service_work
);
1095 clear_bit(XGBE_STOPPED
, &pdata
->dev_state
);
1097 DBGPR("<--xgbe_start\n");
1102 xgbe_free_irqs(pdata
);
1105 xgbe_napi_disable(pdata
, 1);
1112 static void xgbe_stop(struct xgbe_prv_data
*pdata
)
1114 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1115 struct xgbe_phy_if
*phy_if
= &pdata
->phy_if
;
1116 struct xgbe_channel
*channel
;
1117 struct net_device
*netdev
= pdata
->netdev
;
1118 struct netdev_queue
*txq
;
1121 DBGPR("-->xgbe_stop\n");
1123 if (test_bit(XGBE_STOPPED
, &pdata
->dev_state
))
1126 netif_tx_stop_all_queues(netdev
);
1128 xgbe_stop_timers(pdata
);
1129 flush_workqueue(pdata
->dev_workqueue
);
1131 hw_if
->disable_tx(pdata
);
1132 hw_if
->disable_rx(pdata
);
1134 xgbe_free_irqs(pdata
);
1136 xgbe_napi_disable(pdata
, 1);
1138 phy_if
->phy_stop(pdata
);
1142 channel
= pdata
->channel
;
1143 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
1144 if (!channel
->tx_ring
)
1147 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
1148 netdev_tx_reset_queue(txq
);
1151 set_bit(XGBE_STOPPED
, &pdata
->dev_state
);
1153 DBGPR("<--xgbe_stop\n");
1156 static void xgbe_stopdev(struct work_struct
*work
)
1158 struct xgbe_prv_data
*pdata
= container_of(work
,
1159 struct xgbe_prv_data
,
1166 xgbe_free_tx_data(pdata
);
1167 xgbe_free_rx_data(pdata
);
1171 netdev_alert(pdata
->netdev
, "device stopped\n");
1174 static void xgbe_restart_dev(struct xgbe_prv_data
*pdata
)
1176 DBGPR("-->xgbe_restart_dev\n");
1178 /* If not running, "restart" will happen on open */
1179 if (!netif_running(pdata
->netdev
))
1184 xgbe_free_tx_data(pdata
);
1185 xgbe_free_rx_data(pdata
);
1189 DBGPR("<--xgbe_restart_dev\n");
1192 static void xgbe_restart(struct work_struct
*work
)
1194 struct xgbe_prv_data
*pdata
= container_of(work
,
1195 struct xgbe_prv_data
,
1200 xgbe_restart_dev(pdata
);
1205 static void xgbe_tx_tstamp(struct work_struct
*work
)
1207 struct xgbe_prv_data
*pdata
= container_of(work
,
1208 struct xgbe_prv_data
,
1210 struct skb_shared_hwtstamps hwtstamps
;
1212 unsigned long flags
;
1214 if (pdata
->tx_tstamp
) {
1215 nsec
= timecounter_cyc2time(&pdata
->tstamp_tc
,
1218 memset(&hwtstamps
, 0, sizeof(hwtstamps
));
1219 hwtstamps
.hwtstamp
= ns_to_ktime(nsec
);
1220 skb_tstamp_tx(pdata
->tx_tstamp_skb
, &hwtstamps
);
1223 dev_kfree_skb_any(pdata
->tx_tstamp_skb
);
1225 spin_lock_irqsave(&pdata
->tstamp_lock
, flags
);
1226 pdata
->tx_tstamp_skb
= NULL
;
1227 spin_unlock_irqrestore(&pdata
->tstamp_lock
, flags
);
1230 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data
*pdata
,
1231 struct ifreq
*ifreq
)
1233 if (copy_to_user(ifreq
->ifr_data
, &pdata
->tstamp_config
,
1234 sizeof(pdata
->tstamp_config
)))
1240 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data
*pdata
,
1241 struct ifreq
*ifreq
)
1243 struct hwtstamp_config config
;
1244 unsigned int mac_tscr
;
1246 if (copy_from_user(&config
, ifreq
->ifr_data
, sizeof(config
)))
1254 switch (config
.tx_type
) {
1255 case HWTSTAMP_TX_OFF
:
1258 case HWTSTAMP_TX_ON
:
1259 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1266 switch (config
.rx_filter
) {
1267 case HWTSTAMP_FILTER_NONE
:
1270 case HWTSTAMP_FILTER_ALL
:
1271 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENALL
, 1);
1272 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1275 /* PTP v2, UDP, any kind of event packet */
1276 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
1277 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1278 /* PTP v1, UDP, any kind of event packet */
1279 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
1280 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1281 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1282 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1283 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1286 /* PTP v2, UDP, Sync packet */
1287 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
1288 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1289 /* PTP v1, UDP, Sync packet */
1290 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
1291 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1292 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1293 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1294 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1297 /* PTP v2, UDP, Delay_req packet */
1298 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
1299 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1300 /* PTP v1, UDP, Delay_req packet */
1301 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
1302 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1303 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1304 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1305 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1306 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1309 /* 802.AS1, Ethernet, any kind of event packet */
1310 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1311 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1312 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1313 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1316 /* 802.AS1, Ethernet, Sync packet */
1317 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
1318 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1319 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1320 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1323 /* 802.AS1, Ethernet, Delay_req packet */
1324 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
1325 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1326 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1327 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1328 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1331 /* PTP v2/802.AS1, any layer, any kind of event packet */
1332 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
1333 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1334 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1335 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1336 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1337 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1338 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1341 /* PTP v2/802.AS1, any layer, Sync packet */
1342 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
1343 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1344 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1345 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1346 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1347 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1348 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1351 /* PTP v2/802.AS1, any layer, Delay_req packet */
1352 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
1353 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1354 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1355 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1356 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1357 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1358 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1359 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1366 pdata
->hw_if
.config_tstamp(pdata
, mac_tscr
);
1368 memcpy(&pdata
->tstamp_config
, &config
, sizeof(config
));
1373 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data
*pdata
,
1374 struct sk_buff
*skb
,
1375 struct xgbe_packet_data
*packet
)
1377 unsigned long flags
;
1379 if (XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
, PTP
)) {
1380 spin_lock_irqsave(&pdata
->tstamp_lock
, flags
);
1381 if (pdata
->tx_tstamp_skb
) {
1382 /* Another timestamp in progress, ignore this one */
1383 XGMAC_SET_BITS(packet
->attributes
,
1384 TX_PACKET_ATTRIBUTES
, PTP
, 0);
1386 pdata
->tx_tstamp_skb
= skb_get(skb
);
1387 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1389 spin_unlock_irqrestore(&pdata
->tstamp_lock
, flags
);
1392 if (!XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
, PTP
))
1393 skb_tx_timestamp(skb
);
1396 static void xgbe_prep_vlan(struct sk_buff
*skb
, struct xgbe_packet_data
*packet
)
1398 if (skb_vlan_tag_present(skb
))
1399 packet
->vlan_ctag
= skb_vlan_tag_get(skb
);
1402 static int xgbe_prep_tso(struct sk_buff
*skb
, struct xgbe_packet_data
*packet
)
1406 if (!XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1410 ret
= skb_cow_head(skb
, 0);
1414 packet
->header_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1415 packet
->tcp_header_len
= tcp_hdrlen(skb
);
1416 packet
->tcp_payload_len
= skb
->len
- packet
->header_len
;
1417 packet
->mss
= skb_shinfo(skb
)->gso_size
;
1418 DBGPR(" packet->header_len=%u\n", packet
->header_len
);
1419 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1420 packet
->tcp_header_len
, packet
->tcp_payload_len
);
1421 DBGPR(" packet->mss=%u\n", packet
->mss
);
1423 /* Update the number of packets that will ultimately be transmitted
1424 * along with the extra bytes for each extra packet
1426 packet
->tx_packets
= skb_shinfo(skb
)->gso_segs
;
1427 packet
->tx_bytes
+= (packet
->tx_packets
- 1) * packet
->header_len
;
1432 static int xgbe_is_tso(struct sk_buff
*skb
)
1434 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1437 if (!skb_is_gso(skb
))
1440 DBGPR(" TSO packet to be processed\n");
1445 static void xgbe_packet_info(struct xgbe_prv_data
*pdata
,
1446 struct xgbe_ring
*ring
, struct sk_buff
*skb
,
1447 struct xgbe_packet_data
*packet
)
1449 struct skb_frag_struct
*frag
;
1450 unsigned int context_desc
;
1457 packet
->rdesc_count
= 0;
1459 packet
->tx_packets
= 1;
1460 packet
->tx_bytes
= skb
->len
;
1462 if (xgbe_is_tso(skb
)) {
1463 /* TSO requires an extra descriptor if mss is different */
1464 if (skb_shinfo(skb
)->gso_size
!= ring
->tx
.cur_mss
) {
1466 packet
->rdesc_count
++;
1469 /* TSO requires an extra descriptor for TSO header */
1470 packet
->rdesc_count
++;
1472 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1474 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1476 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1477 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1480 if (skb_vlan_tag_present(skb
)) {
1481 /* VLAN requires an extra descriptor if tag is different */
1482 if (skb_vlan_tag_get(skb
) != ring
->tx
.cur_vlan_ctag
)
1483 /* We can share with the TSO context descriptor */
1484 if (!context_desc
) {
1486 packet
->rdesc_count
++;
1489 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1493 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
1494 (pdata
->tstamp_config
.tx_type
== HWTSTAMP_TX_ON
))
1495 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1498 for (len
= skb_headlen(skb
); len
;) {
1499 packet
->rdesc_count
++;
1500 len
-= min_t(unsigned int, len
, XGBE_TX_MAX_BUF_SIZE
);
1503 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1504 frag
= &skb_shinfo(skb
)->frags
[i
];
1505 for (len
= skb_frag_size(frag
); len
; ) {
1506 packet
->rdesc_count
++;
1507 len
-= min_t(unsigned int, len
, XGBE_TX_MAX_BUF_SIZE
);
1512 static int xgbe_open(struct net_device
*netdev
)
1514 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1515 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1518 DBGPR("-->xgbe_open\n");
1520 /* Reset the phy settings */
1521 ret
= xgbe_phy_reset(pdata
);
1525 /* Enable the clocks */
1526 ret
= clk_prepare_enable(pdata
->sysclk
);
1528 netdev_alert(netdev
, "dma clk_prepare_enable failed\n");
1532 ret
= clk_prepare_enable(pdata
->ptpclk
);
1534 netdev_alert(netdev
, "ptp clk_prepare_enable failed\n");
1538 /* Calculate the Rx buffer size before allocating rings */
1539 ret
= xgbe_calc_rx_buf_size(netdev
, netdev
->mtu
);
1542 pdata
->rx_buf_size
= ret
;
1544 /* Allocate the channel and ring structures */
1545 ret
= xgbe_alloc_channels(pdata
);
1549 /* Allocate the ring descriptors and buffers */
1550 ret
= desc_if
->alloc_ring_resources(pdata
);
1554 INIT_WORK(&pdata
->service_work
, xgbe_service
);
1555 INIT_WORK(&pdata
->restart_work
, xgbe_restart
);
1556 INIT_WORK(&pdata
->stopdev_work
, xgbe_stopdev
);
1557 INIT_WORK(&pdata
->tx_tstamp_work
, xgbe_tx_tstamp
);
1558 xgbe_init_timers(pdata
);
1560 ret
= xgbe_start(pdata
);
1564 clear_bit(XGBE_DOWN
, &pdata
->dev_state
);
1566 DBGPR("<--xgbe_open\n");
1571 desc_if
->free_ring_resources(pdata
);
1574 xgbe_free_channels(pdata
);
1577 clk_disable_unprepare(pdata
->ptpclk
);
1580 clk_disable_unprepare(pdata
->sysclk
);
1585 static int xgbe_close(struct net_device
*netdev
)
1587 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1588 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1590 DBGPR("-->xgbe_close\n");
1592 /* Stop the device */
1595 /* Free the ring descriptors and buffers */
1596 desc_if
->free_ring_resources(pdata
);
1598 /* Free the channel and ring structures */
1599 xgbe_free_channels(pdata
);
1601 /* Disable the clocks */
1602 clk_disable_unprepare(pdata
->ptpclk
);
1603 clk_disable_unprepare(pdata
->sysclk
);
1605 set_bit(XGBE_DOWN
, &pdata
->dev_state
);
1607 DBGPR("<--xgbe_close\n");
1612 static int xgbe_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1614 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1615 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1616 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1617 struct xgbe_channel
*channel
;
1618 struct xgbe_ring
*ring
;
1619 struct xgbe_packet_data
*packet
;
1620 struct netdev_queue
*txq
;
1623 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb
->len
);
1625 channel
= pdata
->channel
+ skb
->queue_mapping
;
1626 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
1627 ring
= channel
->tx_ring
;
1628 packet
= &ring
->packet_data
;
1632 if (skb
->len
== 0) {
1633 netif_err(pdata
, tx_err
, netdev
,
1634 "empty skb received from stack\n");
1635 dev_kfree_skb_any(skb
);
1636 goto tx_netdev_return
;
1639 /* Calculate preliminary packet info */
1640 memset(packet
, 0, sizeof(*packet
));
1641 xgbe_packet_info(pdata
, ring
, skb
, packet
);
1643 /* Check that there are enough descriptors available */
1644 ret
= xgbe_maybe_stop_tx_queue(channel
, ring
, packet
->rdesc_count
);
1646 goto tx_netdev_return
;
1648 ret
= xgbe_prep_tso(skb
, packet
);
1650 netif_err(pdata
, tx_err
, netdev
,
1651 "error processing TSO packet\n");
1652 dev_kfree_skb_any(skb
);
1653 goto tx_netdev_return
;
1655 xgbe_prep_vlan(skb
, packet
);
1657 if (!desc_if
->map_tx_skb(channel
, skb
)) {
1658 dev_kfree_skb_any(skb
);
1659 goto tx_netdev_return
;
1662 xgbe_prep_tx_tstamp(pdata
, skb
, packet
);
1664 /* Report on the actual number of bytes (to be) sent */
1665 netdev_tx_sent_queue(txq
, packet
->tx_bytes
);
1667 /* Configure required descriptor fields for transmission */
1668 hw_if
->dev_xmit(channel
);
1670 if (netif_msg_pktdata(pdata
))
1671 xgbe_print_pkt(netdev
, skb
, true);
1673 /* Stop the queue in advance if there may not be enough descriptors */
1674 xgbe_maybe_stop_tx_queue(channel
, ring
, XGBE_TX_MAX_DESCS
);
1682 static void xgbe_set_rx_mode(struct net_device
*netdev
)
1684 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1685 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1687 DBGPR("-->xgbe_set_rx_mode\n");
1689 hw_if
->config_rx_mode(pdata
);
1691 DBGPR("<--xgbe_set_rx_mode\n");
1694 static int xgbe_set_mac_address(struct net_device
*netdev
, void *addr
)
1696 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1697 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1698 struct sockaddr
*saddr
= addr
;
1700 DBGPR("-->xgbe_set_mac_address\n");
1702 if (!is_valid_ether_addr(saddr
->sa_data
))
1703 return -EADDRNOTAVAIL
;
1705 memcpy(netdev
->dev_addr
, saddr
->sa_data
, netdev
->addr_len
);
1707 hw_if
->set_mac_address(pdata
, netdev
->dev_addr
);
1709 DBGPR("<--xgbe_set_mac_address\n");
1714 static int xgbe_ioctl(struct net_device
*netdev
, struct ifreq
*ifreq
, int cmd
)
1716 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1721 ret
= xgbe_get_hwtstamp_settings(pdata
, ifreq
);
1725 ret
= xgbe_set_hwtstamp_settings(pdata
, ifreq
);
1735 static int xgbe_change_mtu(struct net_device
*netdev
, int mtu
)
1737 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1740 DBGPR("-->xgbe_change_mtu\n");
1742 ret
= xgbe_calc_rx_buf_size(netdev
, mtu
);
1746 pdata
->rx_buf_size
= ret
;
1749 xgbe_restart_dev(pdata
);
1751 DBGPR("<--xgbe_change_mtu\n");
1756 static void xgbe_tx_timeout(struct net_device
*netdev
)
1758 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1760 netdev_warn(netdev
, "tx timeout, device restarting\n");
1761 schedule_work(&pdata
->restart_work
);
1764 static void xgbe_get_stats64(struct net_device
*netdev
,
1765 struct rtnl_link_stats64
*s
)
1767 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1768 struct xgbe_mmc_stats
*pstats
= &pdata
->mmc_stats
;
1770 DBGPR("-->%s\n", __func__
);
1772 pdata
->hw_if
.read_mmc_stats(pdata
);
1774 s
->rx_packets
= pstats
->rxframecount_gb
;
1775 s
->rx_bytes
= pstats
->rxoctetcount_gb
;
1776 s
->rx_errors
= pstats
->rxframecount_gb
-
1777 pstats
->rxbroadcastframes_g
-
1778 pstats
->rxmulticastframes_g
-
1779 pstats
->rxunicastframes_g
;
1780 s
->multicast
= pstats
->rxmulticastframes_g
;
1781 s
->rx_length_errors
= pstats
->rxlengtherror
;
1782 s
->rx_crc_errors
= pstats
->rxcrcerror
;
1783 s
->rx_fifo_errors
= pstats
->rxfifooverflow
;
1785 s
->tx_packets
= pstats
->txframecount_gb
;
1786 s
->tx_bytes
= pstats
->txoctetcount_gb
;
1787 s
->tx_errors
= pstats
->txframecount_gb
- pstats
->txframecount_g
;
1788 s
->tx_dropped
= netdev
->stats
.tx_dropped
;
1790 DBGPR("<--%s\n", __func__
);
1793 static int xgbe_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
,
1796 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1797 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1799 DBGPR("-->%s\n", __func__
);
1801 set_bit(vid
, pdata
->active_vlans
);
1802 hw_if
->update_vlan_hash_table(pdata
);
1804 DBGPR("<--%s\n", __func__
);
1809 static int xgbe_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
,
1812 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1813 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1815 DBGPR("-->%s\n", __func__
);
1817 clear_bit(vid
, pdata
->active_vlans
);
1818 hw_if
->update_vlan_hash_table(pdata
);
1820 DBGPR("<--%s\n", __func__
);
1825 #ifdef CONFIG_NET_POLL_CONTROLLER
1826 static void xgbe_poll_controller(struct net_device
*netdev
)
1828 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1829 struct xgbe_channel
*channel
;
1832 DBGPR("-->xgbe_poll_controller\n");
1834 if (pdata
->per_channel_irq
) {
1835 channel
= pdata
->channel
;
1836 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
1837 xgbe_dma_isr(channel
->dma_irq
, channel
);
1839 disable_irq(pdata
->dev_irq
);
1840 xgbe_isr(pdata
->dev_irq
, pdata
);
1841 enable_irq(pdata
->dev_irq
);
1844 DBGPR("<--xgbe_poll_controller\n");
1846 #endif /* End CONFIG_NET_POLL_CONTROLLER */
1848 static int xgbe_setup_tc(struct net_device
*netdev
, u32 handle
, __be16 proto
,
1849 struct tc_to_netdev
*tc_to_netdev
)
1851 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1854 if (tc_to_netdev
->type
!= TC_SETUP_MQPRIO
)
1857 tc
= tc_to_netdev
->tc
;
1859 if (tc
> pdata
->hw_feat
.tc_cnt
)
1862 pdata
->num_tcs
= tc
;
1863 pdata
->hw_if
.config_tc(pdata
);
1868 static int xgbe_set_features(struct net_device
*netdev
,
1869 netdev_features_t features
)
1871 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1872 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1873 netdev_features_t rxhash
, rxcsum
, rxvlan
, rxvlan_filter
;
1876 rxhash
= pdata
->netdev_features
& NETIF_F_RXHASH
;
1877 rxcsum
= pdata
->netdev_features
& NETIF_F_RXCSUM
;
1878 rxvlan
= pdata
->netdev_features
& NETIF_F_HW_VLAN_CTAG_RX
;
1879 rxvlan_filter
= pdata
->netdev_features
& NETIF_F_HW_VLAN_CTAG_FILTER
;
1881 if ((features
& NETIF_F_RXHASH
) && !rxhash
)
1882 ret
= hw_if
->enable_rss(pdata
);
1883 else if (!(features
& NETIF_F_RXHASH
) && rxhash
)
1884 ret
= hw_if
->disable_rss(pdata
);
1888 if ((features
& NETIF_F_RXCSUM
) && !rxcsum
)
1889 hw_if
->enable_rx_csum(pdata
);
1890 else if (!(features
& NETIF_F_RXCSUM
) && rxcsum
)
1891 hw_if
->disable_rx_csum(pdata
);
1893 if ((features
& NETIF_F_HW_VLAN_CTAG_RX
) && !rxvlan
)
1894 hw_if
->enable_rx_vlan_stripping(pdata
);
1895 else if (!(features
& NETIF_F_HW_VLAN_CTAG_RX
) && rxvlan
)
1896 hw_if
->disable_rx_vlan_stripping(pdata
);
1898 if ((features
& NETIF_F_HW_VLAN_CTAG_FILTER
) && !rxvlan_filter
)
1899 hw_if
->enable_rx_vlan_filtering(pdata
);
1900 else if (!(features
& NETIF_F_HW_VLAN_CTAG_FILTER
) && rxvlan_filter
)
1901 hw_if
->disable_rx_vlan_filtering(pdata
);
1903 pdata
->netdev_features
= features
;
1905 DBGPR("<--xgbe_set_features\n");
1910 static const struct net_device_ops xgbe_netdev_ops
= {
1911 .ndo_open
= xgbe_open
,
1912 .ndo_stop
= xgbe_close
,
1913 .ndo_start_xmit
= xgbe_xmit
,
1914 .ndo_set_rx_mode
= xgbe_set_rx_mode
,
1915 .ndo_set_mac_address
= xgbe_set_mac_address
,
1916 .ndo_validate_addr
= eth_validate_addr
,
1917 .ndo_do_ioctl
= xgbe_ioctl
,
1918 .ndo_change_mtu
= xgbe_change_mtu
,
1919 .ndo_tx_timeout
= xgbe_tx_timeout
,
1920 .ndo_get_stats64
= xgbe_get_stats64
,
1921 .ndo_vlan_rx_add_vid
= xgbe_vlan_rx_add_vid
,
1922 .ndo_vlan_rx_kill_vid
= xgbe_vlan_rx_kill_vid
,
1923 #ifdef CONFIG_NET_POLL_CONTROLLER
1924 .ndo_poll_controller
= xgbe_poll_controller
,
1926 .ndo_setup_tc
= xgbe_setup_tc
,
1927 .ndo_set_features
= xgbe_set_features
,
1930 const struct net_device_ops
*xgbe_get_netdev_ops(void)
1932 return &xgbe_netdev_ops
;
1935 static void xgbe_rx_refresh(struct xgbe_channel
*channel
)
1937 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1938 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1939 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1940 struct xgbe_ring
*ring
= channel
->rx_ring
;
1941 struct xgbe_ring_data
*rdata
;
1943 while (ring
->dirty
!= ring
->cur
) {
1944 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
);
1946 /* Reset rdata values */
1947 desc_if
->unmap_rdata(pdata
, rdata
);
1949 if (desc_if
->map_rx_buffer(pdata
, ring
, rdata
))
1952 hw_if
->rx_desc_reset(pdata
, rdata
, ring
->dirty
);
1957 /* Make sure everything is written before the register write */
1960 /* Update the Rx Tail Pointer Register with address of
1961 * the last cleaned entry */
1962 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
- 1);
1963 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDTR_LO
,
1964 lower_32_bits(rdata
->rdesc_dma
));
1967 static struct sk_buff
*xgbe_create_skb(struct xgbe_prv_data
*pdata
,
1968 struct napi_struct
*napi
,
1969 struct xgbe_ring_data
*rdata
,
1972 struct sk_buff
*skb
;
1974 unsigned int copy_len
;
1976 skb
= napi_alloc_skb(napi
, rdata
->rx
.hdr
.dma_len
);
1980 /* Start with the header buffer which may contain just the header
1981 * or the header plus data
1983 dma_sync_single_range_for_cpu(pdata
->dev
, rdata
->rx
.hdr
.dma_base
,
1984 rdata
->rx
.hdr
.dma_off
,
1985 rdata
->rx
.hdr
.dma_len
, DMA_FROM_DEVICE
);
1987 packet
= page_address(rdata
->rx
.hdr
.pa
.pages
) +
1988 rdata
->rx
.hdr
.pa
.pages_offset
;
1989 copy_len
= (rdata
->rx
.hdr_len
) ? rdata
->rx
.hdr_len
: len
;
1990 copy_len
= min(rdata
->rx
.hdr
.dma_len
, copy_len
);
1991 skb_copy_to_linear_data(skb
, packet
, copy_len
);
1992 skb_put(skb
, copy_len
);
1996 /* Add the remaining data as a frag */
1997 dma_sync_single_range_for_cpu(pdata
->dev
,
1998 rdata
->rx
.buf
.dma_base
,
1999 rdata
->rx
.buf
.dma_off
,
2000 rdata
->rx
.buf
.dma_len
,
2003 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
2004 rdata
->rx
.buf
.pa
.pages
,
2005 rdata
->rx
.buf
.pa
.pages_offset
,
2006 len
, rdata
->rx
.buf
.dma_len
);
2007 rdata
->rx
.buf
.pa
.pages
= NULL
;
2013 static int xgbe_tx_poll(struct xgbe_channel
*channel
)
2015 struct xgbe_prv_data
*pdata
= channel
->pdata
;
2016 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2017 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
2018 struct xgbe_ring
*ring
= channel
->tx_ring
;
2019 struct xgbe_ring_data
*rdata
;
2020 struct xgbe_ring_desc
*rdesc
;
2021 struct net_device
*netdev
= pdata
->netdev
;
2022 struct netdev_queue
*txq
;
2024 unsigned int tx_packets
= 0, tx_bytes
= 0;
2027 DBGPR("-->xgbe_tx_poll\n");
2029 /* Nothing to do if there isn't a Tx ring for this channel */
2035 /* Be sure we get ring->cur before accessing descriptor data */
2038 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
2040 while ((processed
< XGBE_TX_DESC_MAX_PROC
) &&
2041 (ring
->dirty
!= cur
)) {
2042 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
);
2043 rdesc
= rdata
->rdesc
;
2045 if (!hw_if
->tx_complete(rdesc
))
2048 /* Make sure descriptor fields are read after reading the OWN
2052 if (netif_msg_tx_done(pdata
))
2053 xgbe_dump_tx_desc(pdata
, ring
, ring
->dirty
, 1, 0);
2055 if (hw_if
->is_last_desc(rdesc
)) {
2056 tx_packets
+= rdata
->tx
.packets
;
2057 tx_bytes
+= rdata
->tx
.bytes
;
2060 /* Free the SKB and reset the descriptor for re-use */
2061 desc_if
->unmap_rdata(pdata
, rdata
);
2062 hw_if
->tx_desc_reset(rdata
);
2071 netdev_tx_completed_queue(txq
, tx_packets
, tx_bytes
);
2073 if ((ring
->tx
.queue_stopped
== 1) &&
2074 (xgbe_tx_avail_desc(ring
) > XGBE_TX_DESC_MIN_FREE
)) {
2075 ring
->tx
.queue_stopped
= 0;
2076 netif_tx_wake_queue(txq
);
2079 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed
);
2084 static int xgbe_rx_poll(struct xgbe_channel
*channel
, int budget
)
2086 struct xgbe_prv_data
*pdata
= channel
->pdata
;
2087 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2088 struct xgbe_ring
*ring
= channel
->rx_ring
;
2089 struct xgbe_ring_data
*rdata
;
2090 struct xgbe_packet_data
*packet
;
2091 struct net_device
*netdev
= pdata
->netdev
;
2092 struct napi_struct
*napi
;
2093 struct sk_buff
*skb
;
2094 struct skb_shared_hwtstamps
*hwtstamps
;
2095 unsigned int incomplete
, error
, context_next
, context
;
2096 unsigned int len
, rdesc_len
, max_len
;
2097 unsigned int received
= 0;
2098 int packet_count
= 0;
2100 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget
);
2102 /* Nothing to do if there isn't a Rx ring for this channel */
2109 napi
= (pdata
->per_channel_irq
) ? &channel
->napi
: &pdata
->napi
;
2111 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
2112 packet
= &ring
->packet_data
;
2113 while (packet_count
< budget
) {
2114 DBGPR(" cur = %d\n", ring
->cur
);
2116 /* First time in loop see if we need to restore state */
2117 if (!received
&& rdata
->state_saved
) {
2118 skb
= rdata
->state
.skb
;
2119 error
= rdata
->state
.error
;
2120 len
= rdata
->state
.len
;
2122 memset(packet
, 0, sizeof(*packet
));
2129 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
2131 if (xgbe_rx_dirty_desc(ring
) > (XGBE_RX_DESC_CNT
>> 3))
2132 xgbe_rx_refresh(channel
);
2134 if (hw_if
->dev_read(channel
))
2140 incomplete
= XGMAC_GET_BITS(packet
->attributes
,
2141 RX_PACKET_ATTRIBUTES
,
2143 context_next
= XGMAC_GET_BITS(packet
->attributes
,
2144 RX_PACKET_ATTRIBUTES
,
2146 context
= XGMAC_GET_BITS(packet
->attributes
,
2147 RX_PACKET_ATTRIBUTES
,
2150 /* Earlier error, just drain the remaining data */
2151 if ((incomplete
|| context_next
) && error
)
2154 if (error
|| packet
->errors
) {
2156 netif_err(pdata
, rx_err
, netdev
,
2157 "error in received packet\n");
2163 /* Length is cumulative, get this descriptor's length */
2164 rdesc_len
= rdata
->rx
.len
- len
;
2167 if (rdesc_len
&& !skb
) {
2168 skb
= xgbe_create_skb(pdata
, napi
, rdata
,
2172 } else if (rdesc_len
) {
2173 dma_sync_single_range_for_cpu(pdata
->dev
,
2174 rdata
->rx
.buf
.dma_base
,
2175 rdata
->rx
.buf
.dma_off
,
2176 rdata
->rx
.buf
.dma_len
,
2179 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
2180 rdata
->rx
.buf
.pa
.pages
,
2181 rdata
->rx
.buf
.pa
.pages_offset
,
2183 rdata
->rx
.buf
.dma_len
);
2184 rdata
->rx
.buf
.pa
.pages
= NULL
;
2188 if (incomplete
|| context_next
)
2194 /* Be sure we don't exceed the configured MTU */
2195 max_len
= netdev
->mtu
+ ETH_HLEN
;
2196 if (!(netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
2197 (skb
->protocol
== htons(ETH_P_8021Q
)))
2198 max_len
+= VLAN_HLEN
;
2200 if (skb
->len
> max_len
) {
2201 netif_err(pdata
, rx_err
, netdev
,
2202 "packet length exceeds configured MTU\n");
2207 if (netif_msg_pktdata(pdata
))
2208 xgbe_print_pkt(netdev
, skb
, false);
2210 skb_checksum_none_assert(skb
);
2211 if (XGMAC_GET_BITS(packet
->attributes
,
2212 RX_PACKET_ATTRIBUTES
, CSUM_DONE
))
2213 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2215 if (XGMAC_GET_BITS(packet
->attributes
,
2216 RX_PACKET_ATTRIBUTES
, VLAN_CTAG
))
2217 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
2220 if (XGMAC_GET_BITS(packet
->attributes
,
2221 RX_PACKET_ATTRIBUTES
, RX_TSTAMP
)) {
2224 nsec
= timecounter_cyc2time(&pdata
->tstamp_tc
,
2226 hwtstamps
= skb_hwtstamps(skb
);
2227 hwtstamps
->hwtstamp
= ns_to_ktime(nsec
);
2230 if (XGMAC_GET_BITS(packet
->attributes
,
2231 RX_PACKET_ATTRIBUTES
, RSS_HASH
))
2232 skb_set_hash(skb
, packet
->rss_hash
,
2233 packet
->rss_hash_type
);
2236 skb
->protocol
= eth_type_trans(skb
, netdev
);
2237 skb_record_rx_queue(skb
, channel
->queue_index
);
2239 napi_gro_receive(napi
, skb
);
2245 /* Check if we need to save state before leaving */
2246 if (received
&& (incomplete
|| context_next
)) {
2247 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
2248 rdata
->state_saved
= 1;
2249 rdata
->state
.skb
= skb
;
2250 rdata
->state
.len
= len
;
2251 rdata
->state
.error
= error
;
2254 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count
);
2256 return packet_count
;
2259 static int xgbe_one_poll(struct napi_struct
*napi
, int budget
)
2261 struct xgbe_channel
*channel
= container_of(napi
, struct xgbe_channel
,
2263 struct xgbe_prv_data
*pdata
= channel
->pdata
;
2266 DBGPR("-->xgbe_one_poll: budget=%d\n", budget
);
2268 /* Cleanup Tx ring first */
2269 xgbe_tx_poll(channel
);
2271 /* Process Rx ring next */
2272 processed
= xgbe_rx_poll(channel
, budget
);
2274 /* If we processed everything, we are done */
2275 if (processed
< budget
) {
2276 /* Turn off polling */
2277 napi_complete_done(napi
, processed
);
2279 /* Enable Tx and Rx interrupts */
2280 if (pdata
->channel_irq_mode
)
2281 xgbe_enable_rx_tx_int(pdata
, channel
);
2283 enable_irq(channel
->dma_irq
);
2286 DBGPR("<--xgbe_one_poll: received = %d\n", processed
);
2291 static int xgbe_all_poll(struct napi_struct
*napi
, int budget
)
2293 struct xgbe_prv_data
*pdata
= container_of(napi
, struct xgbe_prv_data
,
2295 struct xgbe_channel
*channel
;
2297 int processed
, last_processed
;
2300 DBGPR("-->xgbe_all_poll: budget=%d\n", budget
);
2303 ring_budget
= budget
/ pdata
->rx_ring_count
;
2305 last_processed
= processed
;
2307 channel
= pdata
->channel
;
2308 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2309 /* Cleanup Tx ring first */
2310 xgbe_tx_poll(channel
);
2312 /* Process Rx ring next */
2313 if (ring_budget
> (budget
- processed
))
2314 ring_budget
= budget
- processed
;
2315 processed
+= xgbe_rx_poll(channel
, ring_budget
);
2317 } while ((processed
< budget
) && (processed
!= last_processed
));
2319 /* If we processed everything, we are done */
2320 if (processed
< budget
) {
2321 /* Turn off polling */
2322 napi_complete_done(napi
, processed
);
2324 /* Enable Tx and Rx interrupts */
2325 xgbe_enable_rx_tx_ints(pdata
);
2328 DBGPR("<--xgbe_all_poll: received = %d\n", processed
);
2333 void xgbe_dump_tx_desc(struct xgbe_prv_data
*pdata
, struct xgbe_ring
*ring
,
2334 unsigned int idx
, unsigned int count
, unsigned int flag
)
2336 struct xgbe_ring_data
*rdata
;
2337 struct xgbe_ring_desc
*rdesc
;
2340 rdata
= XGBE_GET_DESC_DATA(ring
, idx
);
2341 rdesc
= rdata
->rdesc
;
2342 netdev_dbg(pdata
->netdev
,
2343 "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx
,
2344 (flag
== 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2345 le32_to_cpu(rdesc
->desc0
),
2346 le32_to_cpu(rdesc
->desc1
),
2347 le32_to_cpu(rdesc
->desc2
),
2348 le32_to_cpu(rdesc
->desc3
));
2353 void xgbe_dump_rx_desc(struct xgbe_prv_data
*pdata
, struct xgbe_ring
*ring
,
2356 struct xgbe_ring_data
*rdata
;
2357 struct xgbe_ring_desc
*rdesc
;
2359 rdata
= XGBE_GET_DESC_DATA(ring
, idx
);
2360 rdesc
= rdata
->rdesc
;
2361 netdev_dbg(pdata
->netdev
,
2362 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2363 idx
, le32_to_cpu(rdesc
->desc0
), le32_to_cpu(rdesc
->desc1
),
2364 le32_to_cpu(rdesc
->desc2
), le32_to_cpu(rdesc
->desc3
));
2367 void xgbe_print_pkt(struct net_device
*netdev
, struct sk_buff
*skb
, bool tx_rx
)
2369 struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
2370 unsigned char *buf
= skb
->data
;
2371 unsigned char buffer
[128];
2374 netdev_dbg(netdev
, "\n************** SKB dump ****************\n");
2376 netdev_dbg(netdev
, "%s packet of %d bytes\n",
2377 (tx_rx
? "TX" : "RX"), skb
->len
);
2379 netdev_dbg(netdev
, "Dst MAC addr: %pM\n", eth
->h_dest
);
2380 netdev_dbg(netdev
, "Src MAC addr: %pM\n", eth
->h_source
);
2381 netdev_dbg(netdev
, "Protocol: %#06hx\n", ntohs(eth
->h_proto
));
2383 for (i
= 0, j
= 0; i
< skb
->len
;) {
2384 j
+= snprintf(buffer
+ j
, sizeof(buffer
) - j
, "%02hhx",
2387 if ((i
% 32) == 0) {
2388 netdev_dbg(netdev
, " %#06x: %s\n", i
- 32, buffer
);
2390 } else if ((i
% 16) == 0) {
2393 } else if ((i
% 4) == 0) {
2398 netdev_dbg(netdev
, " %#06x: %s\n", i
- (i
% 32), buffer
);
2400 netdev_dbg(netdev
, "\n************** SKB dump ****************\n");