Merge 4.14.23 into android-4.14
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
5 Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 The full GNU General Public License is included in this distribution in
17 the file called "COPYING".
18
19 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21 Documentation available at:
22 http://www.stlinux.com
23 Support available at:
24 https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO 5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK 256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER 1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105 * but allow user to force to use the chain instead of the ring
106 */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121 * stmmac_verify_args - verify the driver parameters.
122 * Description: it checks the driver parameters and set a default in case of
123 * errors.
124 */
125 static void stmmac_verify_args(void)
126 {
127 if (unlikely(watchdog < 0))
128 watchdog = TX_TIMEO;
129 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 buf_sz = DEFAULT_BUFSIZE;
131 if (unlikely(flow_ctrl > 1))
132 flow_ctrl = FLOW_AUTO;
133 else if (likely(flow_ctrl < 0))
134 flow_ctrl = FLOW_OFF;
135 if (unlikely((pause < 0) || (pause > 0xffff)))
136 pause = PAUSE_TIME;
137 if (eee_timer < 0)
138 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142 * stmmac_disable_all_queues - Disable all queues
143 * @priv: driver private structure
144 */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148 u32 queue;
149
150 for (queue = 0; queue < rx_queues_cnt; queue++) {
151 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152
153 napi_disable(&rx_q->napi);
154 }
155 }
156
157 /**
158 * stmmac_enable_all_queues - Enable all queues
159 * @priv: driver private structure
160 */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164 u32 queue;
165
166 for (queue = 0; queue < rx_queues_cnt; queue++) {
167 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168
169 napi_enable(&rx_q->napi);
170 }
171 }
172
173 /**
174 * stmmac_stop_all_queues - Stop all queues
175 * @priv: driver private structure
176 */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180 u32 queue;
181
182 for (queue = 0; queue < tx_queues_cnt; queue++)
183 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185
186 /**
187 * stmmac_start_all_queues - Start all queues
188 * @priv: driver private structure
189 */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 u32 queue;
194
195 for (queue = 0; queue < tx_queues_cnt; queue++)
196 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198
199 /**
200 * stmmac_clk_csr_set - dynamically set the MDC clock
201 * @priv: driver private structure
202 * Description: this is to dynamically set the MDC clock according to the csr
203 * clock input.
204 * Note:
205 * If a specific clk_csr value is passed from the platform
206 * this means that the CSR Clock Range selection cannot be
207 * changed at run-time and it is fixed (as reported in the driver
208 * documentation). Viceversa the driver will try to set the MDC
209 * clock dynamically according to the actual clock input.
210 */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213 u32 clk_rate;
214
215 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216
217 /* Platform provided default clk_csr would be assumed valid
218 * for all other cases except for the below mentioned ones.
219 * For values higher than the IEEE 802.3 specified frequency
220 * we can not estimate the proper divider as it is not known
221 * the frequency of clk_csr_i. So we do not change the default
222 * divider.
223 */
224 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225 if (clk_rate < CSR_F_35M)
226 priv->clk_csr = STMMAC_CSR_20_35M;
227 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228 priv->clk_csr = STMMAC_CSR_35_60M;
229 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230 priv->clk_csr = STMMAC_CSR_60_100M;
231 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232 priv->clk_csr = STMMAC_CSR_100_150M;
233 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234 priv->clk_csr = STMMAC_CSR_150_250M;
235 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236 priv->clk_csr = STMMAC_CSR_250_300M;
237 }
238
239 if (priv->plat->has_sun8i) {
240 if (clk_rate > 160000000)
241 priv->clk_csr = 0x03;
242 else if (clk_rate > 80000000)
243 priv->clk_csr = 0x02;
244 else if (clk_rate > 40000000)
245 priv->clk_csr = 0x01;
246 else
247 priv->clk_csr = 0;
248 }
249 }
250
251 static void print_pkt(unsigned char *buf, int len)
252 {
253 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
255 }
256
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
258 {
259 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260 u32 avail;
261
262 if (tx_q->dirty_tx > tx_q->cur_tx)
263 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264 else
265 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
266
267 return avail;
268 }
269
270 /**
271 * stmmac_rx_dirty - Get RX queue dirty
272 * @priv: driver private structure
273 * @queue: RX queue index
274 */
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
276 {
277 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278 u32 dirty;
279
280 if (rx_q->dirty_rx <= rx_q->cur_rx)
281 dirty = rx_q->cur_rx - rx_q->dirty_rx;
282 else
283 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
284
285 return dirty;
286 }
287
288 /**
289 * stmmac_hw_fix_mac_speed - callback for speed selection
290 * @priv: driver private structure
291 * Description: on some platforms (e.g. ST), some HW system configuration
292 * registers have to be set according to the link speed negotiated.
293 */
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295 {
296 struct net_device *ndev = priv->dev;
297 struct phy_device *phydev = ndev->phydev;
298
299 if (likely(priv->plat->fix_mac_speed))
300 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
301 }
302
303 /**
304 * stmmac_enable_eee_mode - check and enter in LPI mode
305 * @priv: driver private structure
306 * Description: this function is to verify and enter in LPI mode in case of
307 * EEE.
308 */
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310 {
311 u32 tx_cnt = priv->plat->tx_queues_to_use;
312 u32 queue;
313
314 /* check if all TX queues have the work finished */
315 for (queue = 0; queue < tx_cnt; queue++) {
316 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317
318 if (tx_q->dirty_tx != tx_q->cur_tx)
319 return; /* still unfinished work */
320 }
321
322 /* Check and enter in LPI mode */
323 if (!priv->tx_path_in_lpi_mode)
324 priv->hw->mac->set_eee_mode(priv->hw,
325 priv->plat->en_tx_lpi_clockgating);
326 }
327
328 /**
329 * stmmac_disable_eee_mode - disable and exit from LPI mode
330 * @priv: driver private structure
331 * Description: this function is to exit and disable EEE in case of
332 * LPI state is true. This is called by the xmit.
333 */
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335 {
336 priv->hw->mac->reset_eee_mode(priv->hw);
337 del_timer_sync(&priv->eee_ctrl_timer);
338 priv->tx_path_in_lpi_mode = false;
339 }
340
341 /**
342 * stmmac_eee_ctrl_timer - EEE TX SW timer.
343 * @arg : data hook
344 * Description:
345 * if there is no data transfer and if we are not in LPI state,
346 * then MAC Transmitter can be moved to LPI state.
347 */
348 static void stmmac_eee_ctrl_timer(unsigned long arg)
349 {
350 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
351
352 stmmac_enable_eee_mode(priv);
353 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
354 }
355
356 /**
357 * stmmac_eee_init - init EEE
358 * @priv: driver private structure
359 * Description:
360 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
361 * can also manage EEE, this function enable the LPI state and start related
362 * timer.
363 */
364 bool stmmac_eee_init(struct stmmac_priv *priv)
365 {
366 struct net_device *ndev = priv->dev;
367 int interface = priv->plat->interface;
368 unsigned long flags;
369 bool ret = false;
370
371 if ((interface != PHY_INTERFACE_MODE_MII) &&
372 (interface != PHY_INTERFACE_MODE_GMII) &&
373 !phy_interface_mode_is_rgmii(interface))
374 goto out;
375
376 /* Using PCS we cannot dial with the phy registers at this stage
377 * so we do not support extra feature like EEE.
378 */
379 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
380 (priv->hw->pcs == STMMAC_PCS_TBI) ||
381 (priv->hw->pcs == STMMAC_PCS_RTBI))
382 goto out;
383
384 /* MAC core supports the EEE feature. */
385 if (priv->dma_cap.eee) {
386 int tx_lpi_timer = priv->tx_lpi_timer;
387
388 /* Check if the PHY supports EEE */
389 if (phy_init_eee(ndev->phydev, 1)) {
390 /* To manage at run-time if the EEE cannot be supported
391 * anymore (for example because the lp caps have been
392 * changed).
393 * In that case the driver disable own timers.
394 */
395 spin_lock_irqsave(&priv->lock, flags);
396 if (priv->eee_active) {
397 netdev_dbg(priv->dev, "disable EEE\n");
398 del_timer_sync(&priv->eee_ctrl_timer);
399 priv->hw->mac->set_eee_timer(priv->hw, 0,
400 tx_lpi_timer);
401 }
402 priv->eee_active = 0;
403 spin_unlock_irqrestore(&priv->lock, flags);
404 goto out;
405 }
406 /* Activate the EEE and start timers */
407 spin_lock_irqsave(&priv->lock, flags);
408 if (!priv->eee_active) {
409 priv->eee_active = 1;
410 setup_timer(&priv->eee_ctrl_timer,
411 stmmac_eee_ctrl_timer,
412 (unsigned long)priv);
413 mod_timer(&priv->eee_ctrl_timer,
414 STMMAC_LPI_T(eee_timer));
415
416 priv->hw->mac->set_eee_timer(priv->hw,
417 STMMAC_DEFAULT_LIT_LS,
418 tx_lpi_timer);
419 }
420 /* Set HW EEE according to the speed */
421 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
422
423 ret = true;
424 spin_unlock_irqrestore(&priv->lock, flags);
425
426 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
427 }
428 out:
429 return ret;
430 }
431
432 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
433 * @priv: driver private structure
434 * @p : descriptor pointer
435 * @skb : the socket buffer
436 * Description :
437 * This function will read timestamp from the descriptor & pass it to stack.
438 * and also perform some sanity checks.
439 */
440 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
441 struct dma_desc *p, struct sk_buff *skb)
442 {
443 struct skb_shared_hwtstamps shhwtstamp;
444 u64 ns;
445
446 if (!priv->hwts_tx_en)
447 return;
448
449 /* exit if skb doesn't support hw tstamp */
450 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
451 return;
452
453 /* check tx tstamp status */
454 if (priv->hw->desc->get_tx_timestamp_status(p)) {
455 /* get the valid tstamp */
456 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
457
458 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
459 shhwtstamp.hwtstamp = ns_to_ktime(ns);
460
461 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
462 /* pass tstamp to stack */
463 skb_tstamp_tx(skb, &shhwtstamp);
464 }
465
466 return;
467 }
468
469 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
470 * @priv: driver private structure
471 * @p : descriptor pointer
472 * @np : next descriptor pointer
473 * @skb : the socket buffer
474 * Description :
475 * This function will read received packet's timestamp from the descriptor
476 * and pass it to stack. It also perform some sanity checks.
477 */
478 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
479 struct dma_desc *np, struct sk_buff *skb)
480 {
481 struct skb_shared_hwtstamps *shhwtstamp = NULL;
482 struct dma_desc *desc = p;
483 u64 ns;
484
485 if (!priv->hwts_rx_en)
486 return;
487 /* For GMAC4, the valid timestamp is from CTX next desc. */
488 if (priv->plat->has_gmac4)
489 desc = np;
490
491 /* Check if timestamp is available */
492 if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
493 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
494 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
495 shhwtstamp = skb_hwtstamps(skb);
496 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
497 shhwtstamp->hwtstamp = ns_to_ktime(ns);
498 } else {
499 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
500 }
501 }
502
503 /**
504 * stmmac_hwtstamp_ioctl - control hardware timestamping.
505 * @dev: device pointer.
506 * @ifr: An IOCTL specific structure, that can contain a pointer to
507 * a proprietary structure used to pass information to the driver.
508 * Description:
509 * This function configures the MAC to enable/disable both outgoing(TX)
510 * and incoming(RX) packets time stamping based on user input.
511 * Return Value:
512 * 0 on success and an appropriate -ve integer on failure.
513 */
514 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
515 {
516 struct stmmac_priv *priv = netdev_priv(dev);
517 struct hwtstamp_config config;
518 struct timespec64 now;
519 u64 temp = 0;
520 u32 ptp_v2 = 0;
521 u32 tstamp_all = 0;
522 u32 ptp_over_ipv4_udp = 0;
523 u32 ptp_over_ipv6_udp = 0;
524 u32 ptp_over_ethernet = 0;
525 u32 snap_type_sel = 0;
526 u32 ts_master_en = 0;
527 u32 ts_event_en = 0;
528 u32 value = 0;
529 u32 sec_inc;
530
531 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
532 netdev_alert(priv->dev, "No support for HW time stamping\n");
533 priv->hwts_tx_en = 0;
534 priv->hwts_rx_en = 0;
535
536 return -EOPNOTSUPP;
537 }
538
539 if (copy_from_user(&config, ifr->ifr_data,
540 sizeof(struct hwtstamp_config)))
541 return -EFAULT;
542
543 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
544 __func__, config.flags, config.tx_type, config.rx_filter);
545
546 /* reserved for future extensions */
547 if (config.flags)
548 return -EINVAL;
549
550 if (config.tx_type != HWTSTAMP_TX_OFF &&
551 config.tx_type != HWTSTAMP_TX_ON)
552 return -ERANGE;
553
554 if (priv->adv_ts) {
555 switch (config.rx_filter) {
556 case HWTSTAMP_FILTER_NONE:
557 /* time stamp no incoming packet at all */
558 config.rx_filter = HWTSTAMP_FILTER_NONE;
559 break;
560
561 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
562 /* PTP v1, UDP, any kind of event packet */
563 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
564 /* take time stamp for all event messages */
565 if (priv->plat->has_gmac4)
566 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
567 else
568 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
569
570 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
571 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
572 break;
573
574 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
575 /* PTP v1, UDP, Sync packet */
576 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
577 /* take time stamp for SYNC messages only */
578 ts_event_en = PTP_TCR_TSEVNTENA;
579
580 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
581 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
582 break;
583
584 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
585 /* PTP v1, UDP, Delay_req packet */
586 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
587 /* take time stamp for Delay_Req messages only */
588 ts_master_en = PTP_TCR_TSMSTRENA;
589 ts_event_en = PTP_TCR_TSEVNTENA;
590
591 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
592 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
593 break;
594
595 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
596 /* PTP v2, UDP, any kind of event packet */
597 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
598 ptp_v2 = PTP_TCR_TSVER2ENA;
599 /* take time stamp for all event messages */
600 if (priv->plat->has_gmac4)
601 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
602 else
603 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
604
605 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
606 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
607 break;
608
609 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
610 /* PTP v2, UDP, Sync packet */
611 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
612 ptp_v2 = PTP_TCR_TSVER2ENA;
613 /* take time stamp for SYNC messages only */
614 ts_event_en = PTP_TCR_TSEVNTENA;
615
616 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
617 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
618 break;
619
620 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
621 /* PTP v2, UDP, Delay_req packet */
622 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
623 ptp_v2 = PTP_TCR_TSVER2ENA;
624 /* take time stamp for Delay_Req messages only */
625 ts_master_en = PTP_TCR_TSMSTRENA;
626 ts_event_en = PTP_TCR_TSEVNTENA;
627
628 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
629 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
630 break;
631
632 case HWTSTAMP_FILTER_PTP_V2_EVENT:
633 /* PTP v2/802.AS1 any layer, any kind of event packet */
634 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
635 ptp_v2 = PTP_TCR_TSVER2ENA;
636 /* take time stamp for all event messages */
637 if (priv->plat->has_gmac4)
638 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
639 else
640 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
641
642 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
643 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
644 ptp_over_ethernet = PTP_TCR_TSIPENA;
645 break;
646
647 case HWTSTAMP_FILTER_PTP_V2_SYNC:
648 /* PTP v2/802.AS1, any layer, Sync packet */
649 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
650 ptp_v2 = PTP_TCR_TSVER2ENA;
651 /* take time stamp for SYNC messages only */
652 ts_event_en = PTP_TCR_TSEVNTENA;
653
654 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
655 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
656 ptp_over_ethernet = PTP_TCR_TSIPENA;
657 break;
658
659 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
660 /* PTP v2/802.AS1, any layer, Delay_req packet */
661 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
662 ptp_v2 = PTP_TCR_TSVER2ENA;
663 /* take time stamp for Delay_Req messages only */
664 ts_master_en = PTP_TCR_TSMSTRENA;
665 ts_event_en = PTP_TCR_TSEVNTENA;
666
667 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 ptp_over_ethernet = PTP_TCR_TSIPENA;
670 break;
671
672 case HWTSTAMP_FILTER_NTP_ALL:
673 case HWTSTAMP_FILTER_ALL:
674 /* time stamp any incoming packet */
675 config.rx_filter = HWTSTAMP_FILTER_ALL;
676 tstamp_all = PTP_TCR_TSENALL;
677 break;
678
679 default:
680 return -ERANGE;
681 }
682 } else {
683 switch (config.rx_filter) {
684 case HWTSTAMP_FILTER_NONE:
685 config.rx_filter = HWTSTAMP_FILTER_NONE;
686 break;
687 default:
688 /* PTP v1, UDP, any kind of event packet */
689 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
690 break;
691 }
692 }
693 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
694 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
695
696 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
697 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
698 else {
699 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
700 tstamp_all | ptp_v2 | ptp_over_ethernet |
701 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
702 ts_master_en | snap_type_sel);
703 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
704
705 /* program Sub Second Increment reg */
706 sec_inc = priv->hw->ptp->config_sub_second_increment(
707 priv->ptpaddr, priv->plat->clk_ptp_rate,
708 priv->plat->has_gmac4);
709 temp = div_u64(1000000000ULL, sec_inc);
710
711 /* calculate default added value:
712 * formula is :
713 * addend = (2^32)/freq_div_ratio;
714 * where, freq_div_ratio = 1e9ns/sec_inc
715 */
716 temp = (u64)(temp << 32);
717 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
718 priv->hw->ptp->config_addend(priv->ptpaddr,
719 priv->default_addend);
720
721 /* initialize system time */
722 ktime_get_real_ts64(&now);
723
724 /* lower 32 bits of tv_sec are safe until y2106 */
725 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
726 now.tv_nsec);
727 }
728
729 return copy_to_user(ifr->ifr_data, &config,
730 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
731 }
732
733 /**
734 * stmmac_init_ptp - init PTP
735 * @priv: driver private structure
736 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
737 * This is done by looking at the HW cap. register.
738 * This function also registers the ptp driver.
739 */
740 static int stmmac_init_ptp(struct stmmac_priv *priv)
741 {
742 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
743 return -EOPNOTSUPP;
744
745 priv->adv_ts = 0;
746 /* Check if adv_ts can be enabled for dwmac 4.x core */
747 if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
748 priv->adv_ts = 1;
749 /* Dwmac 3.x core with extend_desc can support adv_ts */
750 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
751 priv->adv_ts = 1;
752
753 if (priv->dma_cap.time_stamp)
754 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
755
756 if (priv->adv_ts)
757 netdev_info(priv->dev,
758 "IEEE 1588-2008 Advanced Timestamp supported\n");
759
760 priv->hw->ptp = &stmmac_ptp;
761 priv->hwts_tx_en = 0;
762 priv->hwts_rx_en = 0;
763
764 stmmac_ptp_register(priv);
765
766 return 0;
767 }
768
769 static void stmmac_release_ptp(struct stmmac_priv *priv)
770 {
771 if (priv->plat->clk_ptp_ref)
772 clk_disable_unprepare(priv->plat->clk_ptp_ref);
773 stmmac_ptp_unregister(priv);
774 }
775
776 /**
777 * stmmac_mac_flow_ctrl - Configure flow control in all queues
778 * @priv: driver private structure
779 * Description: It is used for configuring the flow control in all queues
780 */
781 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
782 {
783 u32 tx_cnt = priv->plat->tx_queues_to_use;
784
785 priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
786 priv->pause, tx_cnt);
787 }
788
789 /**
790 * stmmac_adjust_link - adjusts the link parameters
791 * @dev: net device structure
792 * Description: this is the helper called by the physical abstraction layer
793 * drivers to communicate the phy link status. According the speed and duplex
794 * this driver can invoke registered glue-logic as well.
795 * It also invoke the eee initialization because it could happen when switch
796 * on different networks (that are eee capable).
797 */
798 static void stmmac_adjust_link(struct net_device *dev)
799 {
800 struct stmmac_priv *priv = netdev_priv(dev);
801 struct phy_device *phydev = dev->phydev;
802 unsigned long flags;
803 bool new_state = false;
804
805 if (!phydev)
806 return;
807
808 spin_lock_irqsave(&priv->lock, flags);
809
810 if (phydev->link) {
811 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
812
813 /* Now we make sure that we can be in full duplex mode.
814 * If not, we operate in half-duplex mode. */
815 if (phydev->duplex != priv->oldduplex) {
816 new_state = true;
817 if (!phydev->duplex)
818 ctrl &= ~priv->hw->link.duplex;
819 else
820 ctrl |= priv->hw->link.duplex;
821 priv->oldduplex = phydev->duplex;
822 }
823 /* Flow Control operation */
824 if (phydev->pause)
825 stmmac_mac_flow_ctrl(priv, phydev->duplex);
826
827 if (phydev->speed != priv->speed) {
828 new_state = true;
829 ctrl &= ~priv->hw->link.speed_mask;
830 switch (phydev->speed) {
831 case SPEED_1000:
832 ctrl |= priv->hw->link.speed1000;
833 break;
834 case SPEED_100:
835 ctrl |= priv->hw->link.speed100;
836 break;
837 case SPEED_10:
838 ctrl |= priv->hw->link.speed10;
839 break;
840 default:
841 netif_warn(priv, link, priv->dev,
842 "broken speed: %d\n", phydev->speed);
843 phydev->speed = SPEED_UNKNOWN;
844 break;
845 }
846 if (phydev->speed != SPEED_UNKNOWN)
847 stmmac_hw_fix_mac_speed(priv);
848 priv->speed = phydev->speed;
849 }
850
851 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
852
853 if (!priv->oldlink) {
854 new_state = true;
855 priv->oldlink = true;
856 }
857 } else if (priv->oldlink) {
858 new_state = true;
859 priv->oldlink = false;
860 priv->speed = SPEED_UNKNOWN;
861 priv->oldduplex = DUPLEX_UNKNOWN;
862 }
863
864 if (new_state && netif_msg_link(priv))
865 phy_print_status(phydev);
866
867 spin_unlock_irqrestore(&priv->lock, flags);
868
869 if (phydev->is_pseudo_fixed_link)
870 /* Stop PHY layer to call the hook to adjust the link in case
871 * of a switch is attached to the stmmac driver.
872 */
873 phydev->irq = PHY_IGNORE_INTERRUPT;
874 else
875 /* At this stage, init the EEE if supported.
876 * Never called in case of fixed_link.
877 */
878 priv->eee_enabled = stmmac_eee_init(priv);
879 }
880
881 /**
882 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
883 * @priv: driver private structure
884 * Description: this is to verify if the HW supports the PCS.
885 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
886 * configured for the TBI, RTBI, or SGMII PHY interface.
887 */
888 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
889 {
890 int interface = priv->plat->interface;
891
892 if (priv->dma_cap.pcs) {
893 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
894 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
895 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
896 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
897 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
898 priv->hw->pcs = STMMAC_PCS_RGMII;
899 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
900 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
901 priv->hw->pcs = STMMAC_PCS_SGMII;
902 }
903 }
904 }
905
906 /**
907 * stmmac_init_phy - PHY initialization
908 * @dev: net device structure
909 * Description: it initializes the driver's PHY state, and attaches the PHY
910 * to the mac driver.
911 * Return value:
912 * 0 on success
913 */
914 static int stmmac_init_phy(struct net_device *dev)
915 {
916 struct stmmac_priv *priv = netdev_priv(dev);
917 struct phy_device *phydev;
918 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
919 char bus_id[MII_BUS_ID_SIZE];
920 int interface = priv->plat->interface;
921 int max_speed = priv->plat->max_speed;
922 priv->oldlink = false;
923 priv->speed = SPEED_UNKNOWN;
924 priv->oldduplex = DUPLEX_UNKNOWN;
925
926 if (priv->plat->phy_node) {
927 phydev = of_phy_connect(dev, priv->plat->phy_node,
928 &stmmac_adjust_link, 0, interface);
929 } else {
930 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
931 priv->plat->bus_id);
932
933 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
934 priv->plat->phy_addr);
935 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
936 phy_id_fmt);
937
938 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
939 interface);
940 }
941
942 if (IS_ERR_OR_NULL(phydev)) {
943 netdev_err(priv->dev, "Could not attach to PHY\n");
944 if (!phydev)
945 return -ENODEV;
946
947 return PTR_ERR(phydev);
948 }
949
950 /* Stop Advertising 1000BASE Capability if interface is not GMII */
951 if ((interface == PHY_INTERFACE_MODE_MII) ||
952 (interface == PHY_INTERFACE_MODE_RMII) ||
953 (max_speed < 1000 && max_speed > 0))
954 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
955 SUPPORTED_1000baseT_Full);
956
957 /*
958 * Broken HW is sometimes missing the pull-up resistor on the
959 * MDIO line, which results in reads to non-existent devices returning
960 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
961 * device as well.
962 * Note: phydev->phy_id is the result of reading the UID PHY registers.
963 */
964 if (!priv->plat->phy_node && phydev->phy_id == 0) {
965 phy_disconnect(phydev);
966 return -ENODEV;
967 }
968
969 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
970 * subsequent PHY polling, make sure we force a link transition if
971 * we have a UP/DOWN/UP transition
972 */
973 if (phydev->is_pseudo_fixed_link)
974 phydev->irq = PHY_POLL;
975
976 phy_attached_info(phydev);
977 return 0;
978 }
979
980 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
981 {
982 u32 rx_cnt = priv->plat->rx_queues_to_use;
983 void *head_rx;
984 u32 queue;
985
986 /* Display RX rings */
987 for (queue = 0; queue < rx_cnt; queue++) {
988 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
989
990 pr_info("\tRX Queue %u rings\n", queue);
991
992 if (priv->extend_desc)
993 head_rx = (void *)rx_q->dma_erx;
994 else
995 head_rx = (void *)rx_q->dma_rx;
996
997 /* Display RX ring */
998 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
999 }
1000 }
1001
1002 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1003 {
1004 u32 tx_cnt = priv->plat->tx_queues_to_use;
1005 void *head_tx;
1006 u32 queue;
1007
1008 /* Display TX rings */
1009 for (queue = 0; queue < tx_cnt; queue++) {
1010 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1011
1012 pr_info("\tTX Queue %d rings\n", queue);
1013
1014 if (priv->extend_desc)
1015 head_tx = (void *)tx_q->dma_etx;
1016 else
1017 head_tx = (void *)tx_q->dma_tx;
1018
1019 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1020 }
1021 }
1022
1023 static void stmmac_display_rings(struct stmmac_priv *priv)
1024 {
1025 /* Display RX ring */
1026 stmmac_display_rx_rings(priv);
1027
1028 /* Display TX ring */
1029 stmmac_display_tx_rings(priv);
1030 }
1031
1032 static int stmmac_set_bfsize(int mtu, int bufsize)
1033 {
1034 int ret = bufsize;
1035
1036 if (mtu >= BUF_SIZE_4KiB)
1037 ret = BUF_SIZE_8KiB;
1038 else if (mtu >= BUF_SIZE_2KiB)
1039 ret = BUF_SIZE_4KiB;
1040 else if (mtu > DEFAULT_BUFSIZE)
1041 ret = BUF_SIZE_2KiB;
1042 else
1043 ret = DEFAULT_BUFSIZE;
1044
1045 return ret;
1046 }
1047
1048 /**
1049 * stmmac_clear_rx_descriptors - clear RX descriptors
1050 * @priv: driver private structure
1051 * @queue: RX queue index
1052 * Description: this function is called to clear the RX descriptors
1053 * in case of both basic and extended descriptors are used.
1054 */
1055 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1056 {
1057 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1058 int i;
1059
1060 /* Clear the RX descriptors */
1061 for (i = 0; i < DMA_RX_SIZE; i++)
1062 if (priv->extend_desc)
1063 priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1064 priv->use_riwt, priv->mode,
1065 (i == DMA_RX_SIZE - 1));
1066 else
1067 priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1068 priv->use_riwt, priv->mode,
1069 (i == DMA_RX_SIZE - 1));
1070 }
1071
1072 /**
1073 * stmmac_clear_tx_descriptors - clear tx descriptors
1074 * @priv: driver private structure
1075 * @queue: TX queue index.
1076 * Description: this function is called to clear the TX descriptors
1077 * in case of both basic and extended descriptors are used.
1078 */
1079 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1080 {
1081 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1082 int i;
1083
1084 /* Clear the TX descriptors */
1085 for (i = 0; i < DMA_TX_SIZE; i++)
1086 if (priv->extend_desc)
1087 priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1088 priv->mode,
1089 (i == DMA_TX_SIZE - 1));
1090 else
1091 priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1092 priv->mode,
1093 (i == DMA_TX_SIZE - 1));
1094 }
1095
1096 /**
1097 * stmmac_clear_descriptors - clear descriptors
1098 * @priv: driver private structure
1099 * Description: this function is called to clear the TX and RX descriptors
1100 * in case of both basic and extended descriptors are used.
1101 */
1102 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1103 {
1104 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1105 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1106 u32 queue;
1107
1108 /* Clear the RX descriptors */
1109 for (queue = 0; queue < rx_queue_cnt; queue++)
1110 stmmac_clear_rx_descriptors(priv, queue);
1111
1112 /* Clear the TX descriptors */
1113 for (queue = 0; queue < tx_queue_cnt; queue++)
1114 stmmac_clear_tx_descriptors(priv, queue);
1115 }
1116
1117 /**
1118 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1119 * @priv: driver private structure
1120 * @p: descriptor pointer
1121 * @i: descriptor index
1122 * @flags: gfp flag
1123 * @queue: RX queue index
1124 * Description: this function is called to allocate a receive buffer, perform
1125 * the DMA mapping and init the descriptor.
1126 */
1127 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1128 int i, gfp_t flags, u32 queue)
1129 {
1130 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1131 struct sk_buff *skb;
1132
1133 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1134 if (!skb) {
1135 netdev_err(priv->dev,
1136 "%s: Rx init fails; skb is NULL\n", __func__);
1137 return -ENOMEM;
1138 }
1139 rx_q->rx_skbuff[i] = skb;
1140 rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1141 priv->dma_buf_sz,
1142 DMA_FROM_DEVICE);
1143 if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1144 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1145 dev_kfree_skb_any(skb);
1146 return -EINVAL;
1147 }
1148
1149 if (priv->synopsys_id >= DWMAC_CORE_4_00)
1150 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1151 else
1152 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1153
1154 if ((priv->hw->mode->init_desc3) &&
1155 (priv->dma_buf_sz == BUF_SIZE_16KiB))
1156 priv->hw->mode->init_desc3(p);
1157
1158 return 0;
1159 }
1160
1161 /**
1162 * stmmac_free_rx_buffer - free RX dma buffers
1163 * @priv: private structure
1164 * @queue: RX queue index
1165 * @i: buffer index.
1166 */
1167 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1168 {
1169 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1170
1171 if (rx_q->rx_skbuff[i]) {
1172 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1173 priv->dma_buf_sz, DMA_FROM_DEVICE);
1174 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1175 }
1176 rx_q->rx_skbuff[i] = NULL;
1177 }
1178
1179 /**
1180 * stmmac_free_tx_buffer - free RX dma buffers
1181 * @priv: private structure
1182 * @queue: RX queue index
1183 * @i: buffer index.
1184 */
1185 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1186 {
1187 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1188
1189 if (tx_q->tx_skbuff_dma[i].buf) {
1190 if (tx_q->tx_skbuff_dma[i].map_as_page)
1191 dma_unmap_page(priv->device,
1192 tx_q->tx_skbuff_dma[i].buf,
1193 tx_q->tx_skbuff_dma[i].len,
1194 DMA_TO_DEVICE);
1195 else
1196 dma_unmap_single(priv->device,
1197 tx_q->tx_skbuff_dma[i].buf,
1198 tx_q->tx_skbuff_dma[i].len,
1199 DMA_TO_DEVICE);
1200 }
1201
1202 if (tx_q->tx_skbuff[i]) {
1203 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1204 tx_q->tx_skbuff[i] = NULL;
1205 tx_q->tx_skbuff_dma[i].buf = 0;
1206 tx_q->tx_skbuff_dma[i].map_as_page = false;
1207 }
1208 }
1209
1210 /**
1211 * init_dma_rx_desc_rings - init the RX descriptor rings
1212 * @dev: net device structure
1213 * @flags: gfp flag.
1214 * Description: this function initializes the DMA RX descriptors
1215 * and allocates the socket buffers. It supports the chained and ring
1216 * modes.
1217 */
1218 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1219 {
1220 struct stmmac_priv *priv = netdev_priv(dev);
1221 u32 rx_count = priv->plat->rx_queues_to_use;
1222 unsigned int bfsize = 0;
1223 int ret = -ENOMEM;
1224 int queue;
1225 int i;
1226
1227 if (priv->hw->mode->set_16kib_bfsize)
1228 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1229
1230 if (bfsize < BUF_SIZE_16KiB)
1231 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1232
1233 priv->dma_buf_sz = bfsize;
1234
1235 /* RX INITIALIZATION */
1236 netif_dbg(priv, probe, priv->dev,
1237 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1238
1239 for (queue = 0; queue < rx_count; queue++) {
1240 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1241
1242 netif_dbg(priv, probe, priv->dev,
1243 "(%s) dma_rx_phy=0x%08x\n", __func__,
1244 (u32)rx_q->dma_rx_phy);
1245
1246 for (i = 0; i < DMA_RX_SIZE; i++) {
1247 struct dma_desc *p;
1248
1249 if (priv->extend_desc)
1250 p = &((rx_q->dma_erx + i)->basic);
1251 else
1252 p = rx_q->dma_rx + i;
1253
1254 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1255 queue);
1256 if (ret)
1257 goto err_init_rx_buffers;
1258
1259 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1260 rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1261 (unsigned int)rx_q->rx_skbuff_dma[i]);
1262 }
1263
1264 rx_q->cur_rx = 0;
1265 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1266
1267 stmmac_clear_rx_descriptors(priv, queue);
1268
1269 /* Setup the chained descriptor addresses */
1270 if (priv->mode == STMMAC_CHAIN_MODE) {
1271 if (priv->extend_desc)
1272 priv->hw->mode->init(rx_q->dma_erx,
1273 rx_q->dma_rx_phy,
1274 DMA_RX_SIZE, 1);
1275 else
1276 priv->hw->mode->init(rx_q->dma_rx,
1277 rx_q->dma_rx_phy,
1278 DMA_RX_SIZE, 0);
1279 }
1280 }
1281
1282 buf_sz = bfsize;
1283
1284 return 0;
1285
1286 err_init_rx_buffers:
1287 while (queue >= 0) {
1288 while (--i >= 0)
1289 stmmac_free_rx_buffer(priv, queue, i);
1290
1291 if (queue == 0)
1292 break;
1293
1294 i = DMA_RX_SIZE;
1295 queue--;
1296 }
1297
1298 return ret;
1299 }
1300
1301 /**
1302 * init_dma_tx_desc_rings - init the TX descriptor rings
1303 * @dev: net device structure.
1304 * Description: this function initializes the DMA TX descriptors
1305 * and allocates the socket buffers. It supports the chained and ring
1306 * modes.
1307 */
1308 static int init_dma_tx_desc_rings(struct net_device *dev)
1309 {
1310 struct stmmac_priv *priv = netdev_priv(dev);
1311 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1312 u32 queue;
1313 int i;
1314
1315 for (queue = 0; queue < tx_queue_cnt; queue++) {
1316 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1317
1318 netif_dbg(priv, probe, priv->dev,
1319 "(%s) dma_tx_phy=0x%08x\n", __func__,
1320 (u32)tx_q->dma_tx_phy);
1321
1322 /* Setup the chained descriptor addresses */
1323 if (priv->mode == STMMAC_CHAIN_MODE) {
1324 if (priv->extend_desc)
1325 priv->hw->mode->init(tx_q->dma_etx,
1326 tx_q->dma_tx_phy,
1327 DMA_TX_SIZE, 1);
1328 else
1329 priv->hw->mode->init(tx_q->dma_tx,
1330 tx_q->dma_tx_phy,
1331 DMA_TX_SIZE, 0);
1332 }
1333
1334 for (i = 0; i < DMA_TX_SIZE; i++) {
1335 struct dma_desc *p;
1336 if (priv->extend_desc)
1337 p = &((tx_q->dma_etx + i)->basic);
1338 else
1339 p = tx_q->dma_tx + i;
1340
1341 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1342 p->des0 = 0;
1343 p->des1 = 0;
1344 p->des2 = 0;
1345 p->des3 = 0;
1346 } else {
1347 p->des2 = 0;
1348 }
1349
1350 tx_q->tx_skbuff_dma[i].buf = 0;
1351 tx_q->tx_skbuff_dma[i].map_as_page = false;
1352 tx_q->tx_skbuff_dma[i].len = 0;
1353 tx_q->tx_skbuff_dma[i].last_segment = false;
1354 tx_q->tx_skbuff[i] = NULL;
1355 }
1356
1357 tx_q->dirty_tx = 0;
1358 tx_q->cur_tx = 0;
1359
1360 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1361 }
1362
1363 return 0;
1364 }
1365
1366 /**
1367 * init_dma_desc_rings - init the RX/TX descriptor rings
1368 * @dev: net device structure
1369 * @flags: gfp flag.
1370 * Description: this function initializes the DMA RX/TX descriptors
1371 * and allocates the socket buffers. It supports the chained and ring
1372 * modes.
1373 */
1374 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1375 {
1376 struct stmmac_priv *priv = netdev_priv(dev);
1377 int ret;
1378
1379 ret = init_dma_rx_desc_rings(dev, flags);
1380 if (ret)
1381 return ret;
1382
1383 ret = init_dma_tx_desc_rings(dev);
1384
1385 stmmac_clear_descriptors(priv);
1386
1387 if (netif_msg_hw(priv))
1388 stmmac_display_rings(priv);
1389
1390 return ret;
1391 }
1392
1393 /**
1394 * dma_free_rx_skbufs - free RX dma buffers
1395 * @priv: private structure
1396 * @queue: RX queue index
1397 */
1398 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1399 {
1400 int i;
1401
1402 for (i = 0; i < DMA_RX_SIZE; i++)
1403 stmmac_free_rx_buffer(priv, queue, i);
1404 }
1405
1406 /**
1407 * dma_free_tx_skbufs - free TX dma buffers
1408 * @priv: private structure
1409 * @queue: TX queue index
1410 */
1411 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1412 {
1413 int i;
1414
1415 for (i = 0; i < DMA_TX_SIZE; i++)
1416 stmmac_free_tx_buffer(priv, queue, i);
1417 }
1418
1419 /**
1420 * free_dma_rx_desc_resources - free RX dma desc resources
1421 * @priv: private structure
1422 */
1423 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1424 {
1425 u32 rx_count = priv->plat->rx_queues_to_use;
1426 u32 queue;
1427
1428 /* Free RX queue resources */
1429 for (queue = 0; queue < rx_count; queue++) {
1430 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1431
1432 /* Release the DMA RX socket buffers */
1433 dma_free_rx_skbufs(priv, queue);
1434
1435 /* Free DMA regions of consistent memory previously allocated */
1436 if (!priv->extend_desc)
1437 dma_free_coherent(priv->device,
1438 DMA_RX_SIZE * sizeof(struct dma_desc),
1439 rx_q->dma_rx, rx_q->dma_rx_phy);
1440 else
1441 dma_free_coherent(priv->device, DMA_RX_SIZE *
1442 sizeof(struct dma_extended_desc),
1443 rx_q->dma_erx, rx_q->dma_rx_phy);
1444
1445 kfree(rx_q->rx_skbuff_dma);
1446 kfree(rx_q->rx_skbuff);
1447 }
1448 }
1449
1450 /**
1451 * free_dma_tx_desc_resources - free TX dma desc resources
1452 * @priv: private structure
1453 */
1454 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1455 {
1456 u32 tx_count = priv->plat->tx_queues_to_use;
1457 u32 queue;
1458
1459 /* Free TX queue resources */
1460 for (queue = 0; queue < tx_count; queue++) {
1461 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1462
1463 /* Release the DMA TX socket buffers */
1464 dma_free_tx_skbufs(priv, queue);
1465
1466 /* Free DMA regions of consistent memory previously allocated */
1467 if (!priv->extend_desc)
1468 dma_free_coherent(priv->device,
1469 DMA_TX_SIZE * sizeof(struct dma_desc),
1470 tx_q->dma_tx, tx_q->dma_tx_phy);
1471 else
1472 dma_free_coherent(priv->device, DMA_TX_SIZE *
1473 sizeof(struct dma_extended_desc),
1474 tx_q->dma_etx, tx_q->dma_tx_phy);
1475
1476 kfree(tx_q->tx_skbuff_dma);
1477 kfree(tx_q->tx_skbuff);
1478 }
1479 }
1480
1481 /**
1482 * alloc_dma_rx_desc_resources - alloc RX resources.
1483 * @priv: private structure
1484 * Description: according to which descriptor can be used (extend or basic)
1485 * this function allocates the resources for TX and RX paths. In case of
1486 * reception, for example, it pre-allocated the RX socket buffer in order to
1487 * allow zero-copy mechanism.
1488 */
1489 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1490 {
1491 u32 rx_count = priv->plat->rx_queues_to_use;
1492 int ret = -ENOMEM;
1493 u32 queue;
1494
1495 /* RX queues buffers and DMA */
1496 for (queue = 0; queue < rx_count; queue++) {
1497 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1498
1499 rx_q->queue_index = queue;
1500 rx_q->priv_data = priv;
1501
1502 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1503 sizeof(dma_addr_t),
1504 GFP_KERNEL);
1505 if (!rx_q->rx_skbuff_dma)
1506 goto err_dma;
1507
1508 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1509 sizeof(struct sk_buff *),
1510 GFP_KERNEL);
1511 if (!rx_q->rx_skbuff)
1512 goto err_dma;
1513
1514 if (priv->extend_desc) {
1515 rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1516 DMA_RX_SIZE *
1517 sizeof(struct
1518 dma_extended_desc),
1519 &rx_q->dma_rx_phy,
1520 GFP_KERNEL);
1521 if (!rx_q->dma_erx)
1522 goto err_dma;
1523
1524 } else {
1525 rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1526 DMA_RX_SIZE *
1527 sizeof(struct
1528 dma_desc),
1529 &rx_q->dma_rx_phy,
1530 GFP_KERNEL);
1531 if (!rx_q->dma_rx)
1532 goto err_dma;
1533 }
1534 }
1535
1536 return 0;
1537
1538 err_dma:
1539 free_dma_rx_desc_resources(priv);
1540
1541 return ret;
1542 }
1543
1544 /**
1545 * alloc_dma_tx_desc_resources - alloc TX resources.
1546 * @priv: private structure
1547 * Description: according to which descriptor can be used (extend or basic)
1548 * this function allocates the resources for TX and RX paths. In case of
1549 * reception, for example, it pre-allocated the RX socket buffer in order to
1550 * allow zero-copy mechanism.
1551 */
1552 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1553 {
1554 u32 tx_count = priv->plat->tx_queues_to_use;
1555 int ret = -ENOMEM;
1556 u32 queue;
1557
1558 /* TX queues buffers and DMA */
1559 for (queue = 0; queue < tx_count; queue++) {
1560 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1561
1562 tx_q->queue_index = queue;
1563 tx_q->priv_data = priv;
1564
1565 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1566 sizeof(*tx_q->tx_skbuff_dma),
1567 GFP_KERNEL);
1568 if (!tx_q->tx_skbuff_dma)
1569 goto err_dma;
1570
1571 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1572 sizeof(struct sk_buff *),
1573 GFP_KERNEL);
1574 if (!tx_q->tx_skbuff)
1575 goto err_dma;
1576
1577 if (priv->extend_desc) {
1578 tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1579 DMA_TX_SIZE *
1580 sizeof(struct
1581 dma_extended_desc),
1582 &tx_q->dma_tx_phy,
1583 GFP_KERNEL);
1584 if (!tx_q->dma_etx)
1585 goto err_dma;
1586 } else {
1587 tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1588 DMA_TX_SIZE *
1589 sizeof(struct
1590 dma_desc),
1591 &tx_q->dma_tx_phy,
1592 GFP_KERNEL);
1593 if (!tx_q->dma_tx)
1594 goto err_dma;
1595 }
1596 }
1597
1598 return 0;
1599
1600 err_dma:
1601 free_dma_tx_desc_resources(priv);
1602
1603 return ret;
1604 }
1605
1606 /**
1607 * alloc_dma_desc_resources - alloc TX/RX resources.
1608 * @priv: private structure
1609 * Description: according to which descriptor can be used (extend or basic)
1610 * this function allocates the resources for TX and RX paths. In case of
1611 * reception, for example, it pre-allocated the RX socket buffer in order to
1612 * allow zero-copy mechanism.
1613 */
1614 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1615 {
1616 /* RX Allocation */
1617 int ret = alloc_dma_rx_desc_resources(priv);
1618
1619 if (ret)
1620 return ret;
1621
1622 ret = alloc_dma_tx_desc_resources(priv);
1623
1624 return ret;
1625 }
1626
1627 /**
1628 * free_dma_desc_resources - free dma desc resources
1629 * @priv: private structure
1630 */
1631 static void free_dma_desc_resources(struct stmmac_priv *priv)
1632 {
1633 /* Release the DMA RX socket buffers */
1634 free_dma_rx_desc_resources(priv);
1635
1636 /* Release the DMA TX socket buffers */
1637 free_dma_tx_desc_resources(priv);
1638 }
1639
1640 /**
1641 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1642 * @priv: driver private structure
1643 * Description: It is used for enabling the rx queues in the MAC
1644 */
1645 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1646 {
1647 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1648 int queue;
1649 u8 mode;
1650
1651 for (queue = 0; queue < rx_queues_count; queue++) {
1652 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1653 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1654 }
1655 }
1656
1657 /**
1658 * stmmac_start_rx_dma - start RX DMA channel
1659 * @priv: driver private structure
1660 * @chan: RX channel index
1661 * Description:
1662 * This starts a RX DMA channel
1663 */
1664 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1665 {
1666 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1667 priv->hw->dma->start_rx(priv->ioaddr, chan);
1668 }
1669
1670 /**
1671 * stmmac_start_tx_dma - start TX DMA channel
1672 * @priv: driver private structure
1673 * @chan: TX channel index
1674 * Description:
1675 * This starts a TX DMA channel
1676 */
1677 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1678 {
1679 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1680 priv->hw->dma->start_tx(priv->ioaddr, chan);
1681 }
1682
1683 /**
1684 * stmmac_stop_rx_dma - stop RX DMA channel
1685 * @priv: driver private structure
1686 * @chan: RX channel index
1687 * Description:
1688 * This stops a RX DMA channel
1689 */
1690 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1691 {
1692 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1693 priv->hw->dma->stop_rx(priv->ioaddr, chan);
1694 }
1695
1696 /**
1697 * stmmac_stop_tx_dma - stop TX DMA channel
1698 * @priv: driver private structure
1699 * @chan: TX channel index
1700 * Description:
1701 * This stops a TX DMA channel
1702 */
1703 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1704 {
1705 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1706 priv->hw->dma->stop_tx(priv->ioaddr, chan);
1707 }
1708
1709 /**
1710 * stmmac_start_all_dma - start all RX and TX DMA channels
1711 * @priv: driver private structure
1712 * Description:
1713 * This starts all the RX and TX DMA channels
1714 */
1715 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1716 {
1717 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1718 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1719 u32 chan = 0;
1720
1721 for (chan = 0; chan < rx_channels_count; chan++)
1722 stmmac_start_rx_dma(priv, chan);
1723
1724 for (chan = 0; chan < tx_channels_count; chan++)
1725 stmmac_start_tx_dma(priv, chan);
1726 }
1727
1728 /**
1729 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1730 * @priv: driver private structure
1731 * Description:
1732 * This stops the RX and TX DMA channels
1733 */
1734 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1735 {
1736 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1737 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1738 u32 chan = 0;
1739
1740 for (chan = 0; chan < rx_channels_count; chan++)
1741 stmmac_stop_rx_dma(priv, chan);
1742
1743 for (chan = 0; chan < tx_channels_count; chan++)
1744 stmmac_stop_tx_dma(priv, chan);
1745 }
1746
1747 /**
1748 * stmmac_dma_operation_mode - HW DMA operation mode
1749 * @priv: driver private structure
1750 * Description: it is used for configuring the DMA operation mode register in
1751 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1752 */
1753 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1754 {
1755 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1756 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1757 int rxfifosz = priv->plat->rx_fifo_size;
1758 u32 txmode = 0;
1759 u32 rxmode = 0;
1760 u32 chan = 0;
1761
1762 if (rxfifosz == 0)
1763 rxfifosz = priv->dma_cap.rx_fifo_size;
1764
1765 if (priv->plat->force_thresh_dma_mode) {
1766 txmode = tc;
1767 rxmode = tc;
1768 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1769 /*
1770 * In case of GMAC, SF mode can be enabled
1771 * to perform the TX COE in HW. This depends on:
1772 * 1) TX COE if actually supported
1773 * 2) There is no bugged Jumbo frame support
1774 * that needs to not insert csum in the TDES.
1775 */
1776 txmode = SF_DMA_MODE;
1777 rxmode = SF_DMA_MODE;
1778 priv->xstats.threshold = SF_DMA_MODE;
1779 } else {
1780 txmode = tc;
1781 rxmode = SF_DMA_MODE;
1782 }
1783
1784 /* configure all channels */
1785 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1786 for (chan = 0; chan < rx_channels_count; chan++)
1787 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1788 rxfifosz);
1789
1790 for (chan = 0; chan < tx_channels_count; chan++)
1791 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1792 } else {
1793 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1794 rxfifosz);
1795 }
1796 }
1797
1798 /**
1799 * stmmac_tx_clean - to manage the transmission completion
1800 * @priv: driver private structure
1801 * @queue: TX queue index
1802 * Description: it reclaims the transmit resources after transmission completes.
1803 */
1804 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1805 {
1806 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1807 unsigned int bytes_compl = 0, pkts_compl = 0;
1808 unsigned int entry;
1809
1810 netif_tx_lock(priv->dev);
1811
1812 priv->xstats.tx_clean++;
1813
1814 entry = tx_q->dirty_tx;
1815 while (entry != tx_q->cur_tx) {
1816 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1817 struct dma_desc *p;
1818 int status;
1819
1820 if (priv->extend_desc)
1821 p = (struct dma_desc *)(tx_q->dma_etx + entry);
1822 else
1823 p = tx_q->dma_tx + entry;
1824
1825 status = priv->hw->desc->tx_status(&priv->dev->stats,
1826 &priv->xstats, p,
1827 priv->ioaddr);
1828 /* Check if the descriptor is owned by the DMA */
1829 if (unlikely(status & tx_dma_own))
1830 break;
1831
1832 /* Just consider the last segment and ...*/
1833 if (likely(!(status & tx_not_ls))) {
1834 /* ... verify the status error condition */
1835 if (unlikely(status & tx_err)) {
1836 priv->dev->stats.tx_errors++;
1837 } else {
1838 priv->dev->stats.tx_packets++;
1839 priv->xstats.tx_pkt_n++;
1840 }
1841 stmmac_get_tx_hwtstamp(priv, p, skb);
1842 }
1843
1844 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1845 if (tx_q->tx_skbuff_dma[entry].map_as_page)
1846 dma_unmap_page(priv->device,
1847 tx_q->tx_skbuff_dma[entry].buf,
1848 tx_q->tx_skbuff_dma[entry].len,
1849 DMA_TO_DEVICE);
1850 else
1851 dma_unmap_single(priv->device,
1852 tx_q->tx_skbuff_dma[entry].buf,
1853 tx_q->tx_skbuff_dma[entry].len,
1854 DMA_TO_DEVICE);
1855 tx_q->tx_skbuff_dma[entry].buf = 0;
1856 tx_q->tx_skbuff_dma[entry].len = 0;
1857 tx_q->tx_skbuff_dma[entry].map_as_page = false;
1858 }
1859
1860 if (priv->hw->mode->clean_desc3)
1861 priv->hw->mode->clean_desc3(tx_q, p);
1862
1863 tx_q->tx_skbuff_dma[entry].last_segment = false;
1864 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1865
1866 if (likely(skb != NULL)) {
1867 pkts_compl++;
1868 bytes_compl += skb->len;
1869 dev_consume_skb_any(skb);
1870 tx_q->tx_skbuff[entry] = NULL;
1871 }
1872
1873 priv->hw->desc->release_tx_desc(p, priv->mode);
1874
1875 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1876 }
1877 tx_q->dirty_tx = entry;
1878
1879 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1880 pkts_compl, bytes_compl);
1881
1882 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1883 queue))) &&
1884 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1885
1886 netif_dbg(priv, tx_done, priv->dev,
1887 "%s: restart transmit\n", __func__);
1888 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1889 }
1890
1891 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1892 stmmac_enable_eee_mode(priv);
1893 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1894 }
1895 netif_tx_unlock(priv->dev);
1896 }
1897
1898 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1899 {
1900 priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1901 }
1902
1903 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1904 {
1905 priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1906 }
1907
1908 /**
1909 * stmmac_tx_err - to manage the tx error
1910 * @priv: driver private structure
1911 * @chan: channel index
1912 * Description: it cleans the descriptors and restarts the transmission
1913 * in case of transmission errors.
1914 */
1915 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1916 {
1917 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1918 int i;
1919
1920 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1921
1922 stmmac_stop_tx_dma(priv, chan);
1923 dma_free_tx_skbufs(priv, chan);
1924 for (i = 0; i < DMA_TX_SIZE; i++)
1925 if (priv->extend_desc)
1926 priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1927 priv->mode,
1928 (i == DMA_TX_SIZE - 1));
1929 else
1930 priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1931 priv->mode,
1932 (i == DMA_TX_SIZE - 1));
1933 tx_q->dirty_tx = 0;
1934 tx_q->cur_tx = 0;
1935 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1936 stmmac_start_tx_dma(priv, chan);
1937
1938 priv->dev->stats.tx_errors++;
1939 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1940 }
1941
1942 /**
1943 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1944 * @priv: driver private structure
1945 * @txmode: TX operating mode
1946 * @rxmode: RX operating mode
1947 * @chan: channel index
1948 * Description: it is used for configuring of the DMA operation mode in
1949 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1950 * mode.
1951 */
1952 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1953 u32 rxmode, u32 chan)
1954 {
1955 int rxfifosz = priv->plat->rx_fifo_size;
1956
1957 if (rxfifosz == 0)
1958 rxfifosz = priv->dma_cap.rx_fifo_size;
1959
1960 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1961 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1962 rxfifosz);
1963 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1964 } else {
1965 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1966 rxfifosz);
1967 }
1968 }
1969
1970 /**
1971 * stmmac_dma_interrupt - DMA ISR
1972 * @priv: driver private structure
1973 * Description: this is the DMA ISR. It is called by the main ISR.
1974 * It calls the dwmac dma routine and schedule poll method in case of some
1975 * work can be done.
1976 */
1977 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1978 {
1979 u32 tx_channel_count = priv->plat->tx_queues_to_use;
1980 int status;
1981 u32 chan;
1982
1983 for (chan = 0; chan < tx_channel_count; chan++) {
1984 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1985
1986 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1987 &priv->xstats, chan);
1988 if (likely((status & handle_rx)) || (status & handle_tx)) {
1989 if (likely(napi_schedule_prep(&rx_q->napi))) {
1990 stmmac_disable_dma_irq(priv, chan);
1991 __napi_schedule(&rx_q->napi);
1992 }
1993 }
1994
1995 if (unlikely(status & tx_hard_error_bump_tc)) {
1996 /* Try to bump up the dma threshold on this failure */
1997 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1998 (tc <= 256)) {
1999 tc += 64;
2000 if (priv->plat->force_thresh_dma_mode)
2001 stmmac_set_dma_operation_mode(priv,
2002 tc,
2003 tc,
2004 chan);
2005 else
2006 stmmac_set_dma_operation_mode(priv,
2007 tc,
2008 SF_DMA_MODE,
2009 chan);
2010 priv->xstats.threshold = tc;
2011 }
2012 } else if (unlikely(status == tx_hard_error)) {
2013 stmmac_tx_err(priv, chan);
2014 }
2015 }
2016 }
2017
2018 /**
2019 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2020 * @priv: driver private structure
2021 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2022 */
2023 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2024 {
2025 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2026 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2027
2028 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2029 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2030 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2031 } else {
2032 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2033 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2034 }
2035
2036 dwmac_mmc_intr_all_mask(priv->mmcaddr);
2037
2038 if (priv->dma_cap.rmon) {
2039 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2040 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2041 } else
2042 netdev_info(priv->dev, "No MAC Management Counters available\n");
2043 }
2044
2045 /**
2046 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2047 * @priv: driver private structure
2048 * Description: select the Enhanced/Alternate or Normal descriptors.
2049 * In case of Enhanced/Alternate, it checks if the extended descriptors are
2050 * supported by the HW capability register.
2051 */
2052 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2053 {
2054 if (priv->plat->enh_desc) {
2055 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2056
2057 /* GMAC older than 3.50 has no extended descriptors */
2058 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2059 dev_info(priv->device, "Enabled extended descriptors\n");
2060 priv->extend_desc = 1;
2061 } else
2062 dev_warn(priv->device, "Extended descriptors not supported\n");
2063
2064 priv->hw->desc = &enh_desc_ops;
2065 } else {
2066 dev_info(priv->device, "Normal descriptors\n");
2067 priv->hw->desc = &ndesc_ops;
2068 }
2069 }
2070
2071 /**
2072 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2073 * @priv: driver private structure
2074 * Description:
2075 * new GMAC chip generations have a new register to indicate the
2076 * presence of the optional feature/functions.
2077 * This can be also used to override the value passed through the
2078 * platform and necessary for old MAC10/100 and GMAC chips.
2079 */
2080 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2081 {
2082 u32 ret = 0;
2083
2084 if (priv->hw->dma->get_hw_feature) {
2085 priv->hw->dma->get_hw_feature(priv->ioaddr,
2086 &priv->dma_cap);
2087 ret = 1;
2088 }
2089
2090 return ret;
2091 }
2092
2093 /**
2094 * stmmac_check_ether_addr - check if the MAC addr is valid
2095 * @priv: driver private structure
2096 * Description:
2097 * it is to verify if the MAC address is valid, in case of failures it
2098 * generates a random MAC address
2099 */
2100 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2101 {
2102 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2103 priv->hw->mac->get_umac_addr(priv->hw,
2104 priv->dev->dev_addr, 0);
2105 if (!is_valid_ether_addr(priv->dev->dev_addr))
2106 eth_hw_addr_random(priv->dev);
2107 netdev_info(priv->dev, "device MAC address %pM\n",
2108 priv->dev->dev_addr);
2109 }
2110 }
2111
2112 /**
2113 * stmmac_init_dma_engine - DMA init.
2114 * @priv: driver private structure
2115 * Description:
2116 * It inits the DMA invoking the specific MAC/GMAC callback.
2117 * Some DMA parameters can be passed from the platform;
2118 * in case of these are not passed a default is kept for the MAC or GMAC.
2119 */
2120 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2121 {
2122 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2123 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2124 struct stmmac_rx_queue *rx_q;
2125 struct stmmac_tx_queue *tx_q;
2126 u32 dummy_dma_rx_phy = 0;
2127 u32 dummy_dma_tx_phy = 0;
2128 u32 chan = 0;
2129 int atds = 0;
2130 int ret = 0;
2131
2132 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2133 dev_err(priv->device, "Invalid DMA configuration\n");
2134 return -EINVAL;
2135 }
2136
2137 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2138 atds = 1;
2139
2140 ret = priv->hw->dma->reset(priv->ioaddr);
2141 if (ret) {
2142 dev_err(priv->device, "Failed to reset the dma\n");
2143 return ret;
2144 }
2145
2146 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2147 /* DMA Configuration */
2148 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2149 dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2150
2151 /* DMA RX Channel Configuration */
2152 for (chan = 0; chan < rx_channels_count; chan++) {
2153 rx_q = &priv->rx_queue[chan];
2154
2155 priv->hw->dma->init_rx_chan(priv->ioaddr,
2156 priv->plat->dma_cfg,
2157 rx_q->dma_rx_phy, chan);
2158
2159 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2160 (DMA_RX_SIZE * sizeof(struct dma_desc));
2161 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2162 rx_q->rx_tail_addr,
2163 chan);
2164 }
2165
2166 /* DMA TX Channel Configuration */
2167 for (chan = 0; chan < tx_channels_count; chan++) {
2168 tx_q = &priv->tx_queue[chan];
2169
2170 priv->hw->dma->init_chan(priv->ioaddr,
2171 priv->plat->dma_cfg,
2172 chan);
2173
2174 priv->hw->dma->init_tx_chan(priv->ioaddr,
2175 priv->plat->dma_cfg,
2176 tx_q->dma_tx_phy, chan);
2177
2178 tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2179 (DMA_TX_SIZE * sizeof(struct dma_desc));
2180 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2181 tx_q->tx_tail_addr,
2182 chan);
2183 }
2184 } else {
2185 rx_q = &priv->rx_queue[chan];
2186 tx_q = &priv->tx_queue[chan];
2187 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2188 tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2189 }
2190
2191 if (priv->plat->axi && priv->hw->dma->axi)
2192 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2193
2194 return ret;
2195 }
2196
2197 /**
2198 * stmmac_tx_timer - mitigation sw timer for tx.
2199 * @data: data pointer
2200 * Description:
2201 * This is the timer handler to directly invoke the stmmac_tx_clean.
2202 */
2203 static void stmmac_tx_timer(unsigned long data)
2204 {
2205 struct stmmac_priv *priv = (struct stmmac_priv *)data;
2206 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2207 u32 queue;
2208
2209 /* let's scan all the tx queues */
2210 for (queue = 0; queue < tx_queues_count; queue++)
2211 stmmac_tx_clean(priv, queue);
2212 }
2213
2214 /**
2215 * stmmac_init_tx_coalesce - init tx mitigation options.
2216 * @priv: driver private structure
2217 * Description:
2218 * This inits the transmit coalesce parameters: i.e. timer rate,
2219 * timer handler and default threshold used for enabling the
2220 * interrupt on completion bit.
2221 */
2222 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2223 {
2224 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2225 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2226 init_timer(&priv->txtimer);
2227 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2228 priv->txtimer.data = (unsigned long)priv;
2229 priv->txtimer.function = stmmac_tx_timer;
2230 add_timer(&priv->txtimer);
2231 }
2232
2233 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2234 {
2235 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2236 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2237 u32 chan;
2238
2239 /* set TX ring length */
2240 if (priv->hw->dma->set_tx_ring_len) {
2241 for (chan = 0; chan < tx_channels_count; chan++)
2242 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2243 (DMA_TX_SIZE - 1), chan);
2244 }
2245
2246 /* set RX ring length */
2247 if (priv->hw->dma->set_rx_ring_len) {
2248 for (chan = 0; chan < rx_channels_count; chan++)
2249 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2250 (DMA_RX_SIZE - 1), chan);
2251 }
2252 }
2253
2254 /**
2255 * stmmac_set_tx_queue_weight - Set TX queue weight
2256 * @priv: driver private structure
2257 * Description: It is used for setting TX queues weight
2258 */
2259 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2260 {
2261 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2262 u32 weight;
2263 u32 queue;
2264
2265 for (queue = 0; queue < tx_queues_count; queue++) {
2266 weight = priv->plat->tx_queues_cfg[queue].weight;
2267 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2268 }
2269 }
2270
2271 /**
2272 * stmmac_configure_cbs - Configure CBS in TX queue
2273 * @priv: driver private structure
2274 * Description: It is used for configuring CBS in AVB TX queues
2275 */
2276 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2277 {
2278 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2279 u32 mode_to_use;
2280 u32 queue;
2281
2282 /* queue 0 is reserved for legacy traffic */
2283 for (queue = 1; queue < tx_queues_count; queue++) {
2284 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2285 if (mode_to_use == MTL_QUEUE_DCB)
2286 continue;
2287
2288 priv->hw->mac->config_cbs(priv->hw,
2289 priv->plat->tx_queues_cfg[queue].send_slope,
2290 priv->plat->tx_queues_cfg[queue].idle_slope,
2291 priv->plat->tx_queues_cfg[queue].high_credit,
2292 priv->plat->tx_queues_cfg[queue].low_credit,
2293 queue);
2294 }
2295 }
2296
2297 /**
2298 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2299 * @priv: driver private structure
2300 * Description: It is used for mapping RX queues to RX dma channels
2301 */
2302 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2303 {
2304 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2305 u32 queue;
2306 u32 chan;
2307
2308 for (queue = 0; queue < rx_queues_count; queue++) {
2309 chan = priv->plat->rx_queues_cfg[queue].chan;
2310 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2311 }
2312 }
2313
2314 /**
2315 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2316 * @priv: driver private structure
2317 * Description: It is used for configuring the RX Queue Priority
2318 */
2319 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2320 {
2321 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2322 u32 queue;
2323 u32 prio;
2324
2325 for (queue = 0; queue < rx_queues_count; queue++) {
2326 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2327 continue;
2328
2329 prio = priv->plat->rx_queues_cfg[queue].prio;
2330 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2331 }
2332 }
2333
2334 /**
2335 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2336 * @priv: driver private structure
2337 * Description: It is used for configuring the TX Queue Priority
2338 */
2339 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2340 {
2341 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2342 u32 queue;
2343 u32 prio;
2344
2345 for (queue = 0; queue < tx_queues_count; queue++) {
2346 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2347 continue;
2348
2349 prio = priv->plat->tx_queues_cfg[queue].prio;
2350 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2351 }
2352 }
2353
2354 /**
2355 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2356 * @priv: driver private structure
2357 * Description: It is used for configuring the RX queue routing
2358 */
2359 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2360 {
2361 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2362 u32 queue;
2363 u8 packet;
2364
2365 for (queue = 0; queue < rx_queues_count; queue++) {
2366 /* no specific packet type routing specified for the queue */
2367 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2368 continue;
2369
2370 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2371 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2372 }
2373 }
2374
2375 /**
2376 * stmmac_mtl_configuration - Configure MTL
2377 * @priv: driver private structure
2378 * Description: It is used for configurring MTL
2379 */
2380 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2381 {
2382 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2383 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2384
2385 if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2386 stmmac_set_tx_queue_weight(priv);
2387
2388 /* Configure MTL RX algorithms */
2389 if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2390 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2391 priv->plat->rx_sched_algorithm);
2392
2393 /* Configure MTL TX algorithms */
2394 if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2395 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2396 priv->plat->tx_sched_algorithm);
2397
2398 /* Configure CBS in AVB TX queues */
2399 if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2400 stmmac_configure_cbs(priv);
2401
2402 /* Map RX MTL to DMA channels */
2403 if (priv->hw->mac->map_mtl_to_dma)
2404 stmmac_rx_queue_dma_chan_map(priv);
2405
2406 /* Enable MAC RX Queues */
2407 if (priv->hw->mac->rx_queue_enable)
2408 stmmac_mac_enable_rx_queues(priv);
2409
2410 /* Set RX priorities */
2411 if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2412 stmmac_mac_config_rx_queues_prio(priv);
2413
2414 /* Set TX priorities */
2415 if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2416 stmmac_mac_config_tx_queues_prio(priv);
2417
2418 /* Set RX routing */
2419 if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2420 stmmac_mac_config_rx_queues_routing(priv);
2421 }
2422
2423 /**
2424 * stmmac_hw_setup - setup mac in a usable state.
2425 * @dev : pointer to the device structure.
2426 * Description:
2427 * this is the main function to setup the HW in a usable state because the
2428 * dma engine is reset, the core registers are configured (e.g. AXI,
2429 * Checksum features, timers). The DMA is ready to start receiving and
2430 * transmitting.
2431 * Return value:
2432 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2433 * file on failure.
2434 */
2435 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2436 {
2437 struct stmmac_priv *priv = netdev_priv(dev);
2438 u32 rx_cnt = priv->plat->rx_queues_to_use;
2439 u32 tx_cnt = priv->plat->tx_queues_to_use;
2440 u32 chan;
2441 int ret;
2442
2443 /* DMA initialization and SW reset */
2444 ret = stmmac_init_dma_engine(priv);
2445 if (ret < 0) {
2446 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2447 __func__);
2448 return ret;
2449 }
2450
2451 /* Copy the MAC addr into the HW */
2452 priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2453
2454 /* PS and related bits will be programmed according to the speed */
2455 if (priv->hw->pcs) {
2456 int speed = priv->plat->mac_port_sel_speed;
2457
2458 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2459 (speed == SPEED_1000)) {
2460 priv->hw->ps = speed;
2461 } else {
2462 dev_warn(priv->device, "invalid port speed\n");
2463 priv->hw->ps = 0;
2464 }
2465 }
2466
2467 /* Initialize the MAC Core */
2468 priv->hw->mac->core_init(priv->hw, dev->mtu);
2469
2470 /* Initialize MTL*/
2471 if (priv->synopsys_id >= DWMAC_CORE_4_00)
2472 stmmac_mtl_configuration(priv);
2473
2474 ret = priv->hw->mac->rx_ipc(priv->hw);
2475 if (!ret) {
2476 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2477 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2478 priv->hw->rx_csum = 0;
2479 }
2480
2481 /* Enable the MAC Rx/Tx */
2482 priv->hw->mac->set_mac(priv->ioaddr, true);
2483
2484 /* Set the HW DMA mode and the COE */
2485 stmmac_dma_operation_mode(priv);
2486
2487 stmmac_mmc_setup(priv);
2488
2489 if (init_ptp) {
2490 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2491 if (ret < 0)
2492 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2493
2494 ret = stmmac_init_ptp(priv);
2495 if (ret == -EOPNOTSUPP)
2496 netdev_warn(priv->dev, "PTP not supported by HW\n");
2497 else if (ret)
2498 netdev_warn(priv->dev, "PTP init failed\n");
2499 }
2500
2501 #ifdef CONFIG_DEBUG_FS
2502 ret = stmmac_init_fs(dev);
2503 if (ret < 0)
2504 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2505 __func__);
2506 #endif
2507 /* Start the ball rolling... */
2508 stmmac_start_all_dma(priv);
2509
2510 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2511
2512 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2513 priv->rx_riwt = MAX_DMA_RIWT;
2514 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2515 }
2516
2517 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2518 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2519
2520 /* set TX and RX rings length */
2521 stmmac_set_rings_length(priv);
2522
2523 /* Enable TSO */
2524 if (priv->tso) {
2525 for (chan = 0; chan < tx_cnt; chan++)
2526 priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2527 }
2528
2529 return 0;
2530 }
2531
2532 static void stmmac_hw_teardown(struct net_device *dev)
2533 {
2534 struct stmmac_priv *priv = netdev_priv(dev);
2535
2536 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2537 }
2538
2539 /**
2540 * stmmac_open - open entry point of the driver
2541 * @dev : pointer to the device structure.
2542 * Description:
2543 * This function is the open entry point of the driver.
2544 * Return value:
2545 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2546 * file on failure.
2547 */
2548 static int stmmac_open(struct net_device *dev)
2549 {
2550 struct stmmac_priv *priv = netdev_priv(dev);
2551 int ret;
2552
2553 stmmac_check_ether_addr(priv);
2554
2555 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2556 priv->hw->pcs != STMMAC_PCS_TBI &&
2557 priv->hw->pcs != STMMAC_PCS_RTBI) {
2558 ret = stmmac_init_phy(dev);
2559 if (ret) {
2560 netdev_err(priv->dev,
2561 "%s: Cannot attach to PHY (error: %d)\n",
2562 __func__, ret);
2563 return ret;
2564 }
2565 }
2566
2567 /* Extra statistics */
2568 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2569 priv->xstats.threshold = tc;
2570
2571 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2572 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2573 priv->mss = 0;
2574
2575 ret = alloc_dma_desc_resources(priv);
2576 if (ret < 0) {
2577 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2578 __func__);
2579 goto dma_desc_error;
2580 }
2581
2582 ret = init_dma_desc_rings(dev, GFP_KERNEL);
2583 if (ret < 0) {
2584 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2585 __func__);
2586 goto init_error;
2587 }
2588
2589 ret = stmmac_hw_setup(dev, true);
2590 if (ret < 0) {
2591 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2592 goto init_error;
2593 }
2594
2595 stmmac_init_tx_coalesce(priv);
2596
2597 if (dev->phydev)
2598 phy_start(dev->phydev);
2599
2600 /* Request the IRQ lines */
2601 ret = request_irq(dev->irq, stmmac_interrupt,
2602 IRQF_SHARED, dev->name, dev);
2603 if (unlikely(ret < 0)) {
2604 netdev_err(priv->dev,
2605 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2606 __func__, dev->irq, ret);
2607 goto irq_error;
2608 }
2609
2610 /* Request the Wake IRQ in case of another line is used for WoL */
2611 if (priv->wol_irq != dev->irq) {
2612 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2613 IRQF_SHARED, dev->name, dev);
2614 if (unlikely(ret < 0)) {
2615 netdev_err(priv->dev,
2616 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2617 __func__, priv->wol_irq, ret);
2618 goto wolirq_error;
2619 }
2620 }
2621
2622 /* Request the IRQ lines */
2623 if (priv->lpi_irq > 0) {
2624 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2625 dev->name, dev);
2626 if (unlikely(ret < 0)) {
2627 netdev_err(priv->dev,
2628 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2629 __func__, priv->lpi_irq, ret);
2630 goto lpiirq_error;
2631 }
2632 }
2633
2634 stmmac_enable_all_queues(priv);
2635 stmmac_start_all_queues(priv);
2636
2637 return 0;
2638
2639 lpiirq_error:
2640 if (priv->wol_irq != dev->irq)
2641 free_irq(priv->wol_irq, dev);
2642 wolirq_error:
2643 free_irq(dev->irq, dev);
2644 irq_error:
2645 if (dev->phydev)
2646 phy_stop(dev->phydev);
2647
2648 del_timer_sync(&priv->txtimer);
2649 stmmac_hw_teardown(dev);
2650 init_error:
2651 free_dma_desc_resources(priv);
2652 dma_desc_error:
2653 if (dev->phydev)
2654 phy_disconnect(dev->phydev);
2655
2656 return ret;
2657 }
2658
2659 /**
2660 * stmmac_release - close entry point of the driver
2661 * @dev : device pointer.
2662 * Description:
2663 * This is the stop entry point of the driver.
2664 */
2665 static int stmmac_release(struct net_device *dev)
2666 {
2667 struct stmmac_priv *priv = netdev_priv(dev);
2668
2669 if (priv->eee_enabled)
2670 del_timer_sync(&priv->eee_ctrl_timer);
2671
2672 /* Stop and disconnect the PHY */
2673 if (dev->phydev) {
2674 phy_stop(dev->phydev);
2675 phy_disconnect(dev->phydev);
2676 }
2677
2678 stmmac_stop_all_queues(priv);
2679
2680 stmmac_disable_all_queues(priv);
2681
2682 del_timer_sync(&priv->txtimer);
2683
2684 /* Free the IRQ lines */
2685 free_irq(dev->irq, dev);
2686 if (priv->wol_irq != dev->irq)
2687 free_irq(priv->wol_irq, dev);
2688 if (priv->lpi_irq > 0)
2689 free_irq(priv->lpi_irq, dev);
2690
2691 /* Stop TX/RX DMA and clear the descriptors */
2692 stmmac_stop_all_dma(priv);
2693
2694 /* Release and free the Rx/Tx resources */
2695 free_dma_desc_resources(priv);
2696
2697 /* Disable the MAC Rx/Tx */
2698 priv->hw->mac->set_mac(priv->ioaddr, false);
2699
2700 netif_carrier_off(dev);
2701
2702 #ifdef CONFIG_DEBUG_FS
2703 stmmac_exit_fs(dev);
2704 #endif
2705
2706 stmmac_release_ptp(priv);
2707
2708 return 0;
2709 }
2710
2711 /**
2712 * stmmac_tso_allocator - close entry point of the driver
2713 * @priv: driver private structure
2714 * @des: buffer start address
2715 * @total_len: total length to fill in descriptors
2716 * @last_segmant: condition for the last descriptor
2717 * @queue: TX queue index
2718 * Description:
2719 * This function fills descriptor and request new descriptors according to
2720 * buffer length to fill
2721 */
2722 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2723 int total_len, bool last_segment, u32 queue)
2724 {
2725 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2726 struct dma_desc *desc;
2727 u32 buff_size;
2728 int tmp_len;
2729
2730 tmp_len = total_len;
2731
2732 while (tmp_len > 0) {
2733 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2734 desc = tx_q->dma_tx + tx_q->cur_tx;
2735
2736 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2737 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2738 TSO_MAX_BUFF_SIZE : tmp_len;
2739
2740 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2741 0, 1,
2742 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2743 0, 0);
2744
2745 tmp_len -= TSO_MAX_BUFF_SIZE;
2746 }
2747 }
2748
2749 /**
2750 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2751 * @skb : the socket buffer
2752 * @dev : device pointer
2753 * Description: this is the transmit function that is called on TSO frames
2754 * (support available on GMAC4 and newer chips).
2755 * Diagram below show the ring programming in case of TSO frames:
2756 *
2757 * First Descriptor
2758 * --------
2759 * | DES0 |---> buffer1 = L2/L3/L4 header
2760 * | DES1 |---> TCP Payload (can continue on next descr...)
2761 * | DES2 |---> buffer 1 and 2 len
2762 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2763 * --------
2764 * |
2765 * ...
2766 * |
2767 * --------
2768 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
2769 * | DES1 | --|
2770 * | DES2 | --> buffer 1 and 2 len
2771 * | DES3 |
2772 * --------
2773 *
2774 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2775 */
2776 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2777 {
2778 struct dma_desc *desc, *first, *mss_desc = NULL;
2779 struct stmmac_priv *priv = netdev_priv(dev);
2780 int nfrags = skb_shinfo(skb)->nr_frags;
2781 u32 queue = skb_get_queue_mapping(skb);
2782 unsigned int first_entry, des;
2783 struct stmmac_tx_queue *tx_q;
2784 int tmp_pay_len = 0;
2785 u32 pay_len, mss;
2786 u8 proto_hdr_len;
2787 int i;
2788
2789 tx_q = &priv->tx_queue[queue];
2790
2791 /* Compute header lengths */
2792 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2793
2794 /* Desc availability based on threshold should be enough safe */
2795 if (unlikely(stmmac_tx_avail(priv, queue) <
2796 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2797 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2798 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2799 queue));
2800 /* This is a hard error, log it. */
2801 netdev_err(priv->dev,
2802 "%s: Tx Ring full when queue awake\n",
2803 __func__);
2804 }
2805 return NETDEV_TX_BUSY;
2806 }
2807
2808 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2809
2810 mss = skb_shinfo(skb)->gso_size;
2811
2812 /* set new MSS value if needed */
2813 if (mss != priv->mss) {
2814 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2815 priv->hw->desc->set_mss(mss_desc, mss);
2816 priv->mss = mss;
2817 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2818 }
2819
2820 if (netif_msg_tx_queued(priv)) {
2821 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2822 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2823 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2824 skb->data_len);
2825 }
2826
2827 first_entry = tx_q->cur_tx;
2828
2829 desc = tx_q->dma_tx + first_entry;
2830 first = desc;
2831
2832 /* first descriptor: fill Headers on Buf1 */
2833 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2834 DMA_TO_DEVICE);
2835 if (dma_mapping_error(priv->device, des))
2836 goto dma_map_err;
2837
2838 tx_q->tx_skbuff_dma[first_entry].buf = des;
2839 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2840
2841 first->des0 = cpu_to_le32(des);
2842
2843 /* Fill start of payload in buff2 of first descriptor */
2844 if (pay_len)
2845 first->des1 = cpu_to_le32(des + proto_hdr_len);
2846
2847 /* If needed take extra descriptors to fill the remaining payload */
2848 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2849
2850 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2851
2852 /* Prepare fragments */
2853 for (i = 0; i < nfrags; i++) {
2854 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2855
2856 des = skb_frag_dma_map(priv->device, frag, 0,
2857 skb_frag_size(frag),
2858 DMA_TO_DEVICE);
2859 if (dma_mapping_error(priv->device, des))
2860 goto dma_map_err;
2861
2862 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2863 (i == nfrags - 1), queue);
2864
2865 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2866 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2867 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2868 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2869 }
2870
2871 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2872
2873 /* Only the last descriptor gets to point to the skb. */
2874 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2875
2876 /* We've used all descriptors we need for this skb, however,
2877 * advance cur_tx so that it references a fresh descriptor.
2878 * ndo_start_xmit will fill this descriptor the next time it's
2879 * called and stmmac_tx_clean may clean up to this descriptor.
2880 */
2881 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2882
2883 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2884 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2885 __func__);
2886 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2887 }
2888
2889 dev->stats.tx_bytes += skb->len;
2890 priv->xstats.tx_tso_frames++;
2891 priv->xstats.tx_tso_nfrags += nfrags;
2892
2893 /* Manage tx mitigation */
2894 priv->tx_count_frames += nfrags + 1;
2895 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2896 mod_timer(&priv->txtimer,
2897 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2898 } else {
2899 priv->tx_count_frames = 0;
2900 priv->hw->desc->set_tx_ic(desc);
2901 priv->xstats.tx_set_ic_bit++;
2902 }
2903
2904 skb_tx_timestamp(skb);
2905
2906 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2907 priv->hwts_tx_en)) {
2908 /* declare that device is doing timestamping */
2909 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2910 priv->hw->desc->enable_tx_timestamp(first);
2911 }
2912
2913 /* Complete the first descriptor before granting the DMA */
2914 priv->hw->desc->prepare_tso_tx_desc(first, 1,
2915 proto_hdr_len,
2916 pay_len,
2917 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2918 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2919
2920 /* If context desc is used to change MSS */
2921 if (mss_desc)
2922 priv->hw->desc->set_tx_owner(mss_desc);
2923
2924 /* The own bit must be the latest setting done when prepare the
2925 * descriptor and then barrier is needed to make sure that
2926 * all is coherent before granting the DMA engine.
2927 */
2928 dma_wmb();
2929
2930 if (netif_msg_pktdata(priv)) {
2931 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2932 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2933 tx_q->cur_tx, first, nfrags);
2934
2935 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2936 0);
2937
2938 pr_info(">>> frame to be transmitted: ");
2939 print_pkt(skb->data, skb_headlen(skb));
2940 }
2941
2942 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2943
2944 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2945 queue);
2946
2947 return NETDEV_TX_OK;
2948
2949 dma_map_err:
2950 dev_err(priv->device, "Tx dma map failed\n");
2951 dev_kfree_skb(skb);
2952 priv->dev->stats.tx_dropped++;
2953 return NETDEV_TX_OK;
2954 }
2955
2956 /**
2957 * stmmac_xmit - Tx entry point of the driver
2958 * @skb : the socket buffer
2959 * @dev : device pointer
2960 * Description : this is the tx entry point of the driver.
2961 * It programs the chain or the ring and supports oversized frames
2962 * and SG feature.
2963 */
2964 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2965 {
2966 struct stmmac_priv *priv = netdev_priv(dev);
2967 unsigned int nopaged_len = skb_headlen(skb);
2968 int i, csum_insertion = 0, is_jumbo = 0;
2969 u32 queue = skb_get_queue_mapping(skb);
2970 int nfrags = skb_shinfo(skb)->nr_frags;
2971 int entry;
2972 unsigned int first_entry;
2973 struct dma_desc *desc, *first;
2974 struct stmmac_tx_queue *tx_q;
2975 unsigned int enh_desc;
2976 unsigned int des;
2977
2978 tx_q = &priv->tx_queue[queue];
2979
2980 /* Manage oversized TCP frames for GMAC4 device */
2981 if (skb_is_gso(skb) && priv->tso) {
2982 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
2983 return stmmac_tso_xmit(skb, dev);
2984 }
2985
2986 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2987 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2988 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2989 queue));
2990 /* This is a hard error, log it. */
2991 netdev_err(priv->dev,
2992 "%s: Tx Ring full when queue awake\n",
2993 __func__);
2994 }
2995 return NETDEV_TX_BUSY;
2996 }
2997
2998 if (priv->tx_path_in_lpi_mode)
2999 stmmac_disable_eee_mode(priv);
3000
3001 entry = tx_q->cur_tx;
3002 first_entry = entry;
3003
3004 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3005
3006 if (likely(priv->extend_desc))
3007 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3008 else
3009 desc = tx_q->dma_tx + entry;
3010
3011 first = desc;
3012
3013 enh_desc = priv->plat->enh_desc;
3014 /* To program the descriptors according to the size of the frame */
3015 if (enh_desc)
3016 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3017
3018 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3019 DWMAC_CORE_4_00)) {
3020 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3021 if (unlikely(entry < 0))
3022 goto dma_map_err;
3023 }
3024
3025 for (i = 0; i < nfrags; i++) {
3026 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3027 int len = skb_frag_size(frag);
3028 bool last_segment = (i == (nfrags - 1));
3029
3030 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3031
3032 if (likely(priv->extend_desc))
3033 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3034 else
3035 desc = tx_q->dma_tx + entry;
3036
3037 des = skb_frag_dma_map(priv->device, frag, 0, len,
3038 DMA_TO_DEVICE);
3039 if (dma_mapping_error(priv->device, des))
3040 goto dma_map_err; /* should reuse desc w/o issues */
3041
3042 tx_q->tx_skbuff[entry] = NULL;
3043
3044 tx_q->tx_skbuff_dma[entry].buf = des;
3045 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3046 desc->des0 = cpu_to_le32(des);
3047 else
3048 desc->des2 = cpu_to_le32(des);
3049
3050 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3051 tx_q->tx_skbuff_dma[entry].len = len;
3052 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3053
3054 /* Prepare the descriptor and set the own bit too */
3055 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3056 priv->mode, 1, last_segment,
3057 skb->len);
3058 }
3059
3060 /* Only the last descriptor gets to point to the skb. */
3061 tx_q->tx_skbuff[entry] = skb;
3062
3063 /* We've used all descriptors we need for this skb, however,
3064 * advance cur_tx so that it references a fresh descriptor.
3065 * ndo_start_xmit will fill this descriptor the next time it's
3066 * called and stmmac_tx_clean may clean up to this descriptor.
3067 */
3068 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3069 tx_q->cur_tx = entry;
3070
3071 if (netif_msg_pktdata(priv)) {
3072 void *tx_head;
3073
3074 netdev_dbg(priv->dev,
3075 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3076 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3077 entry, first, nfrags);
3078
3079 if (priv->extend_desc)
3080 tx_head = (void *)tx_q->dma_etx;
3081 else
3082 tx_head = (void *)tx_q->dma_tx;
3083
3084 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3085
3086 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3087 print_pkt(skb->data, skb->len);
3088 }
3089
3090 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3091 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3092 __func__);
3093 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3094 }
3095
3096 dev->stats.tx_bytes += skb->len;
3097
3098 /* According to the coalesce parameter the IC bit for the latest
3099 * segment is reset and the timer re-started to clean the tx status.
3100 * This approach takes care about the fragments: desc is the first
3101 * element in case of no SG.
3102 */
3103 priv->tx_count_frames += nfrags + 1;
3104 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3105 mod_timer(&priv->txtimer,
3106 STMMAC_COAL_TIMER(priv->tx_coal_timer));
3107 } else {
3108 priv->tx_count_frames = 0;
3109 priv->hw->desc->set_tx_ic(desc);
3110 priv->xstats.tx_set_ic_bit++;
3111 }
3112
3113 skb_tx_timestamp(skb);
3114
3115 /* Ready to fill the first descriptor and set the OWN bit w/o any
3116 * problems because all the descriptors are actually ready to be
3117 * passed to the DMA engine.
3118 */
3119 if (likely(!is_jumbo)) {
3120 bool last_segment = (nfrags == 0);
3121
3122 des = dma_map_single(priv->device, skb->data,
3123 nopaged_len, DMA_TO_DEVICE);
3124 if (dma_mapping_error(priv->device, des))
3125 goto dma_map_err;
3126
3127 tx_q->tx_skbuff_dma[first_entry].buf = des;
3128 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3129 first->des0 = cpu_to_le32(des);
3130 else
3131 first->des2 = cpu_to_le32(des);
3132
3133 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3134 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3135
3136 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3137 priv->hwts_tx_en)) {
3138 /* declare that device is doing timestamping */
3139 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3140 priv->hw->desc->enable_tx_timestamp(first);
3141 }
3142
3143 /* Prepare the first descriptor setting the OWN bit too */
3144 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3145 csum_insertion, priv->mode, 1,
3146 last_segment, skb->len);
3147
3148 /* The own bit must be the latest setting done when prepare the
3149 * descriptor and then barrier is needed to make sure that
3150 * all is coherent before granting the DMA engine.
3151 */
3152 dma_wmb();
3153 }
3154
3155 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3156
3157 if (priv->synopsys_id < DWMAC_CORE_4_00)
3158 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3159 else
3160 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3161 queue);
3162
3163 return NETDEV_TX_OK;
3164
3165 dma_map_err:
3166 netdev_err(priv->dev, "Tx DMA map failed\n");
3167 dev_kfree_skb(skb);
3168 priv->dev->stats.tx_dropped++;
3169 return NETDEV_TX_OK;
3170 }
3171
3172 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3173 {
3174 struct ethhdr *ehdr;
3175 u16 vlanid;
3176
3177 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3178 NETIF_F_HW_VLAN_CTAG_RX &&
3179 !__vlan_get_tag(skb, &vlanid)) {
3180 /* pop the vlan tag */
3181 ehdr = (struct ethhdr *)skb->data;
3182 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3183 skb_pull(skb, VLAN_HLEN);
3184 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3185 }
3186 }
3187
3188
3189 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3190 {
3191 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3192 return 0;
3193
3194 return 1;
3195 }
3196
3197 /**
3198 * stmmac_rx_refill - refill used skb preallocated buffers
3199 * @priv: driver private structure
3200 * @queue: RX queue index
3201 * Description : this is to reallocate the skb for the reception process
3202 * that is based on zero-copy.
3203 */
3204 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3205 {
3206 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3207 int dirty = stmmac_rx_dirty(priv, queue);
3208 unsigned int entry = rx_q->dirty_rx;
3209
3210 int bfsize = priv->dma_buf_sz;
3211
3212 while (dirty-- > 0) {
3213 struct dma_desc *p;
3214
3215 if (priv->extend_desc)
3216 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3217 else
3218 p = rx_q->dma_rx + entry;
3219
3220 if (likely(!rx_q->rx_skbuff[entry])) {
3221 struct sk_buff *skb;
3222
3223 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3224 if (unlikely(!skb)) {
3225 /* so for a while no zero-copy! */
3226 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3227 if (unlikely(net_ratelimit()))
3228 dev_err(priv->device,
3229 "fail to alloc skb entry %d\n",
3230 entry);
3231 break;
3232 }
3233
3234 rx_q->rx_skbuff[entry] = skb;
3235 rx_q->rx_skbuff_dma[entry] =
3236 dma_map_single(priv->device, skb->data, bfsize,
3237 DMA_FROM_DEVICE);
3238 if (dma_mapping_error(priv->device,
3239 rx_q->rx_skbuff_dma[entry])) {
3240 netdev_err(priv->dev, "Rx DMA map failed\n");
3241 dev_kfree_skb(skb);
3242 break;
3243 }
3244
3245 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3246 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3247 p->des1 = 0;
3248 } else {
3249 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3250 }
3251 if (priv->hw->mode->refill_desc3)
3252 priv->hw->mode->refill_desc3(rx_q, p);
3253
3254 if (rx_q->rx_zeroc_thresh > 0)
3255 rx_q->rx_zeroc_thresh--;
3256
3257 netif_dbg(priv, rx_status, priv->dev,
3258 "refill entry #%d\n", entry);
3259 }
3260 dma_wmb();
3261
3262 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3263 priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3264 else
3265 priv->hw->desc->set_rx_owner(p);
3266
3267 dma_wmb();
3268
3269 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3270 }
3271 rx_q->dirty_rx = entry;
3272 }
3273
3274 /**
3275 * stmmac_rx - manage the receive process
3276 * @priv: driver private structure
3277 * @limit: napi bugget
3278 * @queue: RX queue index.
3279 * Description : this the function called by the napi poll method.
3280 * It gets all the frames inside the ring.
3281 */
3282 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3283 {
3284 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3285 unsigned int entry = rx_q->cur_rx;
3286 int coe = priv->hw->rx_csum;
3287 unsigned int next_entry;
3288 unsigned int count = 0;
3289
3290 if (netif_msg_rx_status(priv)) {
3291 void *rx_head;
3292
3293 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3294 if (priv->extend_desc)
3295 rx_head = (void *)rx_q->dma_erx;
3296 else
3297 rx_head = (void *)rx_q->dma_rx;
3298
3299 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3300 }
3301 while (count < limit) {
3302 int status;
3303 struct dma_desc *p;
3304 struct dma_desc *np;
3305
3306 if (priv->extend_desc)
3307 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3308 else
3309 p = rx_q->dma_rx + entry;
3310
3311 /* read the status of the incoming frame */
3312 status = priv->hw->desc->rx_status(&priv->dev->stats,
3313 &priv->xstats, p);
3314 /* check if managed by the DMA otherwise go ahead */
3315 if (unlikely(status & dma_own))
3316 break;
3317
3318 count++;
3319
3320 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3321 next_entry = rx_q->cur_rx;
3322
3323 if (priv->extend_desc)
3324 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3325 else
3326 np = rx_q->dma_rx + next_entry;
3327
3328 prefetch(np);
3329
3330 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3331 priv->hw->desc->rx_extended_status(&priv->dev->stats,
3332 &priv->xstats,
3333 rx_q->dma_erx +
3334 entry);
3335 if (unlikely(status == discard_frame)) {
3336 priv->dev->stats.rx_errors++;
3337 if (priv->hwts_rx_en && !priv->extend_desc) {
3338 /* DESC2 & DESC3 will be overwritten by device
3339 * with timestamp value, hence reinitialize
3340 * them in stmmac_rx_refill() function so that
3341 * device can reuse it.
3342 */
3343 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3344 rx_q->rx_skbuff[entry] = NULL;
3345 dma_unmap_single(priv->device,
3346 rx_q->rx_skbuff_dma[entry],
3347 priv->dma_buf_sz,
3348 DMA_FROM_DEVICE);
3349 }
3350 } else {
3351 struct sk_buff *skb;
3352 int frame_len;
3353 unsigned int des;
3354
3355 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3356 des = le32_to_cpu(p->des0);
3357 else
3358 des = le32_to_cpu(p->des2);
3359
3360 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3361
3362 /* If frame length is greater than skb buffer size
3363 * (preallocated during init) then the packet is
3364 * ignored
3365 */
3366 if (frame_len > priv->dma_buf_sz) {
3367 netdev_err(priv->dev,
3368 "len %d larger than size (%d)\n",
3369 frame_len, priv->dma_buf_sz);
3370 priv->dev->stats.rx_length_errors++;
3371 break;
3372 }
3373
3374 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3375 * Type frames (LLC/LLC-SNAP)
3376 */
3377 if (unlikely(status != llc_snap))
3378 frame_len -= ETH_FCS_LEN;
3379
3380 if (netif_msg_rx_status(priv)) {
3381 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3382 p, entry, des);
3383 if (frame_len > ETH_FRAME_LEN)
3384 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3385 frame_len, status);
3386 }
3387
3388 /* The zero-copy is always used for all the sizes
3389 * in case of GMAC4 because it needs
3390 * to refill the used descriptors, always.
3391 */
3392 if (unlikely(!priv->plat->has_gmac4 &&
3393 ((frame_len < priv->rx_copybreak) ||
3394 stmmac_rx_threshold_count(rx_q)))) {
3395 skb = netdev_alloc_skb_ip_align(priv->dev,
3396 frame_len);
3397 if (unlikely(!skb)) {
3398 if (net_ratelimit())
3399 dev_warn(priv->device,
3400 "packet dropped\n");
3401 priv->dev->stats.rx_dropped++;
3402 break;
3403 }
3404
3405 dma_sync_single_for_cpu(priv->device,
3406 rx_q->rx_skbuff_dma
3407 [entry], frame_len,
3408 DMA_FROM_DEVICE);
3409 skb_copy_to_linear_data(skb,
3410 rx_q->
3411 rx_skbuff[entry]->data,
3412 frame_len);
3413
3414 skb_put(skb, frame_len);
3415 dma_sync_single_for_device(priv->device,
3416 rx_q->rx_skbuff_dma
3417 [entry], frame_len,
3418 DMA_FROM_DEVICE);
3419 } else {
3420 skb = rx_q->rx_skbuff[entry];
3421 if (unlikely(!skb)) {
3422 netdev_err(priv->dev,
3423 "%s: Inconsistent Rx chain\n",
3424 priv->dev->name);
3425 priv->dev->stats.rx_dropped++;
3426 break;
3427 }
3428 prefetch(skb->data - NET_IP_ALIGN);
3429 rx_q->rx_skbuff[entry] = NULL;
3430 rx_q->rx_zeroc_thresh++;
3431
3432 skb_put(skb, frame_len);
3433 dma_unmap_single(priv->device,
3434 rx_q->rx_skbuff_dma[entry],
3435 priv->dma_buf_sz,
3436 DMA_FROM_DEVICE);
3437 }
3438
3439 if (netif_msg_pktdata(priv)) {
3440 netdev_dbg(priv->dev, "frame received (%dbytes)",
3441 frame_len);
3442 print_pkt(skb->data, frame_len);
3443 }
3444
3445 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3446
3447 stmmac_rx_vlan(priv->dev, skb);
3448
3449 skb->protocol = eth_type_trans(skb, priv->dev);
3450
3451 if (unlikely(!coe))
3452 skb_checksum_none_assert(skb);
3453 else
3454 skb->ip_summed = CHECKSUM_UNNECESSARY;
3455
3456 napi_gro_receive(&rx_q->napi, skb);
3457
3458 priv->dev->stats.rx_packets++;
3459 priv->dev->stats.rx_bytes += frame_len;
3460 }
3461 entry = next_entry;
3462 }
3463
3464 stmmac_rx_refill(priv, queue);
3465
3466 priv->xstats.rx_pkt_n += count;
3467
3468 return count;
3469 }
3470
3471 /**
3472 * stmmac_poll - stmmac poll method (NAPI)
3473 * @napi : pointer to the napi structure.
3474 * @budget : maximum number of packets that the current CPU can receive from
3475 * all interfaces.
3476 * Description :
3477 * To look at the incoming frames and clear the tx resources.
3478 */
3479 static int stmmac_poll(struct napi_struct *napi, int budget)
3480 {
3481 struct stmmac_rx_queue *rx_q =
3482 container_of(napi, struct stmmac_rx_queue, napi);
3483 struct stmmac_priv *priv = rx_q->priv_data;
3484 u32 tx_count = priv->plat->tx_queues_to_use;
3485 u32 chan = rx_q->queue_index;
3486 int work_done = 0;
3487 u32 queue;
3488
3489 priv->xstats.napi_poll++;
3490
3491 /* check all the queues */
3492 for (queue = 0; queue < tx_count; queue++)
3493 stmmac_tx_clean(priv, queue);
3494
3495 work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3496 if (work_done < budget) {
3497 napi_complete_done(napi, work_done);
3498 stmmac_enable_dma_irq(priv, chan);
3499 }
3500 return work_done;
3501 }
3502
3503 /**
3504 * stmmac_tx_timeout
3505 * @dev : Pointer to net device structure
3506 * Description: this function is called when a packet transmission fails to
3507 * complete within a reasonable time. The driver will mark the error in the
3508 * netdev structure and arrange for the device to be reset to a sane state
3509 * in order to transmit a new packet.
3510 */
3511 static void stmmac_tx_timeout(struct net_device *dev)
3512 {
3513 struct stmmac_priv *priv = netdev_priv(dev);
3514 u32 tx_count = priv->plat->tx_queues_to_use;
3515 u32 chan;
3516
3517 /* Clear Tx resources and restart transmitting again */
3518 for (chan = 0; chan < tx_count; chan++)
3519 stmmac_tx_err(priv, chan);
3520 }
3521
3522 /**
3523 * stmmac_set_rx_mode - entry point for multicast addressing
3524 * @dev : pointer to the device structure
3525 * Description:
3526 * This function is a driver entry point which gets called by the kernel
3527 * whenever multicast addresses must be enabled/disabled.
3528 * Return value:
3529 * void.
3530 */
3531 static void stmmac_set_rx_mode(struct net_device *dev)
3532 {
3533 struct stmmac_priv *priv = netdev_priv(dev);
3534
3535 priv->hw->mac->set_filter(priv->hw, dev);
3536 }
3537
3538 /**
3539 * stmmac_change_mtu - entry point to change MTU size for the device.
3540 * @dev : device pointer.
3541 * @new_mtu : the new MTU size for the device.
3542 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
3543 * to drive packet transmission. Ethernet has an MTU of 1500 octets
3544 * (ETH_DATA_LEN). This value can be changed with ifconfig.
3545 * Return value:
3546 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3547 * file on failure.
3548 */
3549 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3550 {
3551 struct stmmac_priv *priv = netdev_priv(dev);
3552
3553 if (netif_running(dev)) {
3554 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3555 return -EBUSY;
3556 }
3557
3558 dev->mtu = new_mtu;
3559
3560 netdev_update_features(dev);
3561
3562 return 0;
3563 }
3564
3565 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3566 netdev_features_t features)
3567 {
3568 struct stmmac_priv *priv = netdev_priv(dev);
3569
3570 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3571 features &= ~NETIF_F_RXCSUM;
3572
3573 if (!priv->plat->tx_coe)
3574 features &= ~NETIF_F_CSUM_MASK;
3575
3576 /* Some GMAC devices have a bugged Jumbo frame support that
3577 * needs to have the Tx COE disabled for oversized frames
3578 * (due to limited buffer sizes). In this case we disable
3579 * the TX csum insertion in the TDES and not use SF.
3580 */
3581 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3582 features &= ~NETIF_F_CSUM_MASK;
3583
3584 /* Disable tso if asked by ethtool */
3585 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3586 if (features & NETIF_F_TSO)
3587 priv->tso = true;
3588 else
3589 priv->tso = false;
3590 }
3591
3592 return features;
3593 }
3594
3595 static int stmmac_set_features(struct net_device *netdev,
3596 netdev_features_t features)
3597 {
3598 struct stmmac_priv *priv = netdev_priv(netdev);
3599
3600 /* Keep the COE Type in case of csum is supporting */
3601 if (features & NETIF_F_RXCSUM)
3602 priv->hw->rx_csum = priv->plat->rx_coe;
3603 else
3604 priv->hw->rx_csum = 0;
3605 /* No check needed because rx_coe has been set before and it will be
3606 * fixed in case of issue.
3607 */
3608 priv->hw->mac->rx_ipc(priv->hw);
3609
3610 return 0;
3611 }
3612
3613 /**
3614 * stmmac_interrupt - main ISR
3615 * @irq: interrupt number.
3616 * @dev_id: to pass the net device pointer.
3617 * Description: this is the main driver interrupt service routine.
3618 * It can call:
3619 * o DMA service routine (to manage incoming frame reception and transmission
3620 * status)
3621 * o Core interrupts to manage: remote wake-up, management counter, LPI
3622 * interrupts.
3623 */
3624 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3625 {
3626 struct net_device *dev = (struct net_device *)dev_id;
3627 struct stmmac_priv *priv = netdev_priv(dev);
3628 u32 rx_cnt = priv->plat->rx_queues_to_use;
3629 u32 tx_cnt = priv->plat->tx_queues_to_use;
3630 u32 queues_count;
3631 u32 queue;
3632
3633 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3634
3635 if (priv->irq_wake)
3636 pm_wakeup_event(priv->device, 0);
3637
3638 if (unlikely(!dev)) {
3639 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3640 return IRQ_NONE;
3641 }
3642
3643 /* To handle GMAC own interrupts */
3644 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3645 int status = priv->hw->mac->host_irq_status(priv->hw,
3646 &priv->xstats);
3647
3648 if (unlikely(status)) {
3649 /* For LPI we need to save the tx status */
3650 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3651 priv->tx_path_in_lpi_mode = true;
3652 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3653 priv->tx_path_in_lpi_mode = false;
3654 }
3655
3656 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3657 for (queue = 0; queue < queues_count; queue++) {
3658 struct stmmac_rx_queue *rx_q =
3659 &priv->rx_queue[queue];
3660
3661 status |=
3662 priv->hw->mac->host_mtl_irq_status(priv->hw,
3663 queue);
3664
3665 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3666 priv->hw->dma->set_rx_tail_ptr)
3667 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3668 rx_q->rx_tail_addr,
3669 queue);
3670 }
3671 }
3672
3673 /* PCS link status */
3674 if (priv->hw->pcs) {
3675 if (priv->xstats.pcs_link)
3676 netif_carrier_on(dev);
3677 else
3678 netif_carrier_off(dev);
3679 }
3680 }
3681
3682 /* To handle DMA interrupts */
3683 stmmac_dma_interrupt(priv);
3684
3685 return IRQ_HANDLED;
3686 }
3687
3688 #ifdef CONFIG_NET_POLL_CONTROLLER
3689 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3690 * to allow network I/O with interrupts disabled.
3691 */
3692 static void stmmac_poll_controller(struct net_device *dev)
3693 {
3694 disable_irq(dev->irq);
3695 stmmac_interrupt(dev->irq, dev);
3696 enable_irq(dev->irq);
3697 }
3698 #endif
3699
3700 /**
3701 * stmmac_ioctl - Entry point for the Ioctl
3702 * @dev: Device pointer.
3703 * @rq: An IOCTL specefic structure, that can contain a pointer to
3704 * a proprietary structure used to pass information to the driver.
3705 * @cmd: IOCTL command
3706 * Description:
3707 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3708 */
3709 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3710 {
3711 int ret = -EOPNOTSUPP;
3712
3713 if (!netif_running(dev))
3714 return -EINVAL;
3715
3716 switch (cmd) {
3717 case SIOCGMIIPHY:
3718 case SIOCGMIIREG:
3719 case SIOCSMIIREG:
3720 if (!dev->phydev)
3721 return -EINVAL;
3722 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3723 break;
3724 case SIOCSHWTSTAMP:
3725 ret = stmmac_hwtstamp_ioctl(dev, rq);
3726 break;
3727 default:
3728 break;
3729 }
3730
3731 return ret;
3732 }
3733
3734 #ifdef CONFIG_DEBUG_FS
3735 static struct dentry *stmmac_fs_dir;
3736
3737 static void sysfs_display_ring(void *head, int size, int extend_desc,
3738 struct seq_file *seq)
3739 {
3740 int i;
3741 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3742 struct dma_desc *p = (struct dma_desc *)head;
3743
3744 for (i = 0; i < size; i++) {
3745 if (extend_desc) {
3746 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3747 i, (unsigned int)virt_to_phys(ep),
3748 le32_to_cpu(ep->basic.des0),
3749 le32_to_cpu(ep->basic.des1),
3750 le32_to_cpu(ep->basic.des2),
3751 le32_to_cpu(ep->basic.des3));
3752 ep++;
3753 } else {
3754 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3755 i, (unsigned int)virt_to_phys(p),
3756 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3757 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3758 p++;
3759 }
3760 seq_printf(seq, "\n");
3761 }
3762 }
3763
3764 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3765 {
3766 struct net_device *dev = seq->private;
3767 struct stmmac_priv *priv = netdev_priv(dev);
3768 u32 rx_count = priv->plat->rx_queues_to_use;
3769 u32 tx_count = priv->plat->tx_queues_to_use;
3770 u32 queue;
3771
3772 for (queue = 0; queue < rx_count; queue++) {
3773 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3774
3775 seq_printf(seq, "RX Queue %d:\n", queue);
3776
3777 if (priv->extend_desc) {
3778 seq_printf(seq, "Extended descriptor ring:\n");
3779 sysfs_display_ring((void *)rx_q->dma_erx,
3780 DMA_RX_SIZE, 1, seq);
3781 } else {
3782 seq_printf(seq, "Descriptor ring:\n");
3783 sysfs_display_ring((void *)rx_q->dma_rx,
3784 DMA_RX_SIZE, 0, seq);
3785 }
3786 }
3787
3788 for (queue = 0; queue < tx_count; queue++) {
3789 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3790
3791 seq_printf(seq, "TX Queue %d:\n", queue);
3792
3793 if (priv->extend_desc) {
3794 seq_printf(seq, "Extended descriptor ring:\n");
3795 sysfs_display_ring((void *)tx_q->dma_etx,
3796 DMA_TX_SIZE, 1, seq);
3797 } else {
3798 seq_printf(seq, "Descriptor ring:\n");
3799 sysfs_display_ring((void *)tx_q->dma_tx,
3800 DMA_TX_SIZE, 0, seq);
3801 }
3802 }
3803
3804 return 0;
3805 }
3806
3807 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3808 {
3809 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3810 }
3811
3812 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3813
3814 static const struct file_operations stmmac_rings_status_fops = {
3815 .owner = THIS_MODULE,
3816 .open = stmmac_sysfs_ring_open,
3817 .read = seq_read,
3818 .llseek = seq_lseek,
3819 .release = single_release,
3820 };
3821
3822 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3823 {
3824 struct net_device *dev = seq->private;
3825 struct stmmac_priv *priv = netdev_priv(dev);
3826
3827 if (!priv->hw_cap_support) {
3828 seq_printf(seq, "DMA HW features not supported\n");
3829 return 0;
3830 }
3831
3832 seq_printf(seq, "==============================\n");
3833 seq_printf(seq, "\tDMA HW features\n");
3834 seq_printf(seq, "==============================\n");
3835
3836 seq_printf(seq, "\t10/100 Mbps: %s\n",
3837 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3838 seq_printf(seq, "\t1000 Mbps: %s\n",
3839 (priv->dma_cap.mbps_1000) ? "Y" : "N");
3840 seq_printf(seq, "\tHalf duplex: %s\n",
3841 (priv->dma_cap.half_duplex) ? "Y" : "N");
3842 seq_printf(seq, "\tHash Filter: %s\n",
3843 (priv->dma_cap.hash_filter) ? "Y" : "N");
3844 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3845 (priv->dma_cap.multi_addr) ? "Y" : "N");
3846 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3847 (priv->dma_cap.pcs) ? "Y" : "N");
3848 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3849 (priv->dma_cap.sma_mdio) ? "Y" : "N");
3850 seq_printf(seq, "\tPMT Remote wake up: %s\n",
3851 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3852 seq_printf(seq, "\tPMT Magic Frame: %s\n",
3853 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3854 seq_printf(seq, "\tRMON module: %s\n",
3855 (priv->dma_cap.rmon) ? "Y" : "N");
3856 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3857 (priv->dma_cap.time_stamp) ? "Y" : "N");
3858 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3859 (priv->dma_cap.atime_stamp) ? "Y" : "N");
3860 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3861 (priv->dma_cap.eee) ? "Y" : "N");
3862 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3863 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3864 (priv->dma_cap.tx_coe) ? "Y" : "N");
3865 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3866 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3867 (priv->dma_cap.rx_coe) ? "Y" : "N");
3868 } else {
3869 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3870 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3871 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3872 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3873 }
3874 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3875 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3876 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3877 priv->dma_cap.number_rx_channel);
3878 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3879 priv->dma_cap.number_tx_channel);
3880 seq_printf(seq, "\tEnhanced descriptors: %s\n",
3881 (priv->dma_cap.enh_desc) ? "Y" : "N");
3882
3883 return 0;
3884 }
3885
3886 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3887 {
3888 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3889 }
3890
3891 static const struct file_operations stmmac_dma_cap_fops = {
3892 .owner = THIS_MODULE,
3893 .open = stmmac_sysfs_dma_cap_open,
3894 .read = seq_read,
3895 .llseek = seq_lseek,
3896 .release = single_release,
3897 };
3898
3899 static int stmmac_init_fs(struct net_device *dev)
3900 {
3901 struct stmmac_priv *priv = netdev_priv(dev);
3902
3903 /* Create per netdev entries */
3904 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3905
3906 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3907 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3908
3909 return -ENOMEM;
3910 }
3911
3912 /* Entry to report DMA RX/TX rings */
3913 priv->dbgfs_rings_status =
3914 debugfs_create_file("descriptors_status", S_IRUGO,
3915 priv->dbgfs_dir, dev,
3916 &stmmac_rings_status_fops);
3917
3918 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3919 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3920 debugfs_remove_recursive(priv->dbgfs_dir);
3921
3922 return -ENOMEM;
3923 }
3924
3925 /* Entry to report the DMA HW features */
3926 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3927 priv->dbgfs_dir,
3928 dev, &stmmac_dma_cap_fops);
3929
3930 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3931 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3932 debugfs_remove_recursive(priv->dbgfs_dir);
3933
3934 return -ENOMEM;
3935 }
3936
3937 return 0;
3938 }
3939
3940 static void stmmac_exit_fs(struct net_device *dev)
3941 {
3942 struct stmmac_priv *priv = netdev_priv(dev);
3943
3944 debugfs_remove_recursive(priv->dbgfs_dir);
3945 }
3946 #endif /* CONFIG_DEBUG_FS */
3947
3948 static const struct net_device_ops stmmac_netdev_ops = {
3949 .ndo_open = stmmac_open,
3950 .ndo_start_xmit = stmmac_xmit,
3951 .ndo_stop = stmmac_release,
3952 .ndo_change_mtu = stmmac_change_mtu,
3953 .ndo_fix_features = stmmac_fix_features,
3954 .ndo_set_features = stmmac_set_features,
3955 .ndo_set_rx_mode = stmmac_set_rx_mode,
3956 .ndo_tx_timeout = stmmac_tx_timeout,
3957 .ndo_do_ioctl = stmmac_ioctl,
3958 #ifdef CONFIG_NET_POLL_CONTROLLER
3959 .ndo_poll_controller = stmmac_poll_controller,
3960 #endif
3961 .ndo_set_mac_address = eth_mac_addr,
3962 };
3963
3964 /**
3965 * stmmac_hw_init - Init the MAC device
3966 * @priv: driver private structure
3967 * Description: this function is to configure the MAC device according to
3968 * some platform parameters or the HW capability register. It prepares the
3969 * driver to use either ring or chain modes and to setup either enhanced or
3970 * normal descriptors.
3971 */
3972 static int stmmac_hw_init(struct stmmac_priv *priv)
3973 {
3974 struct mac_device_info *mac;
3975
3976 /* Identify the MAC HW device */
3977 if (priv->plat->setup) {
3978 mac = priv->plat->setup(priv);
3979 } else if (priv->plat->has_gmac) {
3980 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3981 mac = dwmac1000_setup(priv->ioaddr,
3982 priv->plat->multicast_filter_bins,
3983 priv->plat->unicast_filter_entries,
3984 &priv->synopsys_id);
3985 } else if (priv->plat->has_gmac4) {
3986 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3987 mac = dwmac4_setup(priv->ioaddr,
3988 priv->plat->multicast_filter_bins,
3989 priv->plat->unicast_filter_entries,
3990 &priv->synopsys_id);
3991 } else {
3992 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3993 }
3994 if (!mac)
3995 return -ENOMEM;
3996
3997 priv->hw = mac;
3998
3999 /* dwmac-sun8i only work in chain mode */
4000 if (priv->plat->has_sun8i)
4001 chain_mode = 1;
4002
4003 /* To use the chained or ring mode */
4004 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4005 priv->hw->mode = &dwmac4_ring_mode_ops;
4006 } else {
4007 if (chain_mode) {
4008 priv->hw->mode = &chain_mode_ops;
4009 dev_info(priv->device, "Chain mode enabled\n");
4010 priv->mode = STMMAC_CHAIN_MODE;
4011 } else {
4012 priv->hw->mode = &ring_mode_ops;
4013 dev_info(priv->device, "Ring mode enabled\n");
4014 priv->mode = STMMAC_RING_MODE;
4015 }
4016 }
4017
4018 /* Get the HW capability (new GMAC newer than 3.50a) */
4019 priv->hw_cap_support = stmmac_get_hw_features(priv);
4020 if (priv->hw_cap_support) {
4021 dev_info(priv->device, "DMA HW capability register supported\n");
4022
4023 /* We can override some gmac/dma configuration fields: e.g.
4024 * enh_desc, tx_coe (e.g. that are passed through the
4025 * platform) with the values from the HW capability
4026 * register (if supported).
4027 */
4028 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4029 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4030 priv->hw->pmt = priv->plat->pmt;
4031
4032 /* TXCOE doesn't work in thresh DMA mode */
4033 if (priv->plat->force_thresh_dma_mode)
4034 priv->plat->tx_coe = 0;
4035 else
4036 priv->plat->tx_coe = priv->dma_cap.tx_coe;
4037
4038 /* In case of GMAC4 rx_coe is from HW cap register. */
4039 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4040
4041 if (priv->dma_cap.rx_coe_type2)
4042 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4043 else if (priv->dma_cap.rx_coe_type1)
4044 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4045
4046 } else {
4047 dev_info(priv->device, "No HW DMA feature register supported\n");
4048 }
4049
4050 /* To use alternate (extended), normal or GMAC4 descriptor structures */
4051 if (priv->synopsys_id >= DWMAC_CORE_4_00)
4052 priv->hw->desc = &dwmac4_desc_ops;
4053 else
4054 stmmac_selec_desc_mode(priv);
4055
4056 if (priv->plat->rx_coe) {
4057 priv->hw->rx_csum = priv->plat->rx_coe;
4058 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4059 if (priv->synopsys_id < DWMAC_CORE_4_00)
4060 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4061 }
4062 if (priv->plat->tx_coe)
4063 dev_info(priv->device, "TX Checksum insertion supported\n");
4064
4065 if (priv->plat->pmt) {
4066 dev_info(priv->device, "Wake-Up On Lan supported\n");
4067 device_set_wakeup_capable(priv->device, 1);
4068 }
4069
4070 if (priv->dma_cap.tsoen)
4071 dev_info(priv->device, "TSO supported\n");
4072
4073 return 0;
4074 }
4075
4076 /**
4077 * stmmac_dvr_probe
4078 * @device: device pointer
4079 * @plat_dat: platform data pointer
4080 * @res: stmmac resource pointer
4081 * Description: this is the main probe function used to
4082 * call the alloc_etherdev, allocate the priv structure.
4083 * Return:
4084 * returns 0 on success, otherwise errno.
4085 */
4086 int stmmac_dvr_probe(struct device *device,
4087 struct plat_stmmacenet_data *plat_dat,
4088 struct stmmac_resources *res)
4089 {
4090 struct net_device *ndev = NULL;
4091 struct stmmac_priv *priv;
4092 int ret = 0;
4093 u32 queue;
4094
4095 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4096 MTL_MAX_TX_QUEUES,
4097 MTL_MAX_RX_QUEUES);
4098 if (!ndev)
4099 return -ENOMEM;
4100
4101 SET_NETDEV_DEV(ndev, device);
4102
4103 priv = netdev_priv(ndev);
4104 priv->device = device;
4105 priv->dev = ndev;
4106
4107 stmmac_set_ethtool_ops(ndev);
4108 priv->pause = pause;
4109 priv->plat = plat_dat;
4110 priv->ioaddr = res->addr;
4111 priv->dev->base_addr = (unsigned long)res->addr;
4112
4113 priv->dev->irq = res->irq;
4114 priv->wol_irq = res->wol_irq;
4115 priv->lpi_irq = res->lpi_irq;
4116
4117 if (res->mac)
4118 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4119
4120 dev_set_drvdata(device, priv->dev);
4121
4122 /* Verify driver arguments */
4123 stmmac_verify_args();
4124
4125 /* Override with kernel parameters if supplied XXX CRS XXX
4126 * this needs to have multiple instances
4127 */
4128 if ((phyaddr >= 0) && (phyaddr <= 31))
4129 priv->plat->phy_addr = phyaddr;
4130
4131 if (priv->plat->stmmac_rst) {
4132 ret = reset_control_assert(priv->plat->stmmac_rst);
4133 reset_control_deassert(priv->plat->stmmac_rst);
4134 /* Some reset controllers have only reset callback instead of
4135 * assert + deassert callbacks pair.
4136 */
4137 if (ret == -ENOTSUPP)
4138 reset_control_reset(priv->plat->stmmac_rst);
4139 }
4140
4141 /* Init MAC and get the capabilities */
4142 ret = stmmac_hw_init(priv);
4143 if (ret)
4144 goto error_hw_init;
4145
4146 /* Configure real RX and TX queues */
4147 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4148 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4149
4150 ndev->netdev_ops = &stmmac_netdev_ops;
4151
4152 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4153 NETIF_F_RXCSUM;
4154
4155 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4156 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4157 priv->tso = true;
4158 dev_info(priv->device, "TSO feature enabled\n");
4159 }
4160 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4161 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4162 #ifdef STMMAC_VLAN_TAG_USED
4163 /* Both mac100 and gmac support receive VLAN tag detection */
4164 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4165 #endif
4166 priv->msg_enable = netif_msg_init(debug, default_msg_level);
4167
4168 /* MTU range: 46 - hw-specific max */
4169 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4170 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4171 ndev->max_mtu = JUMBO_LEN;
4172 else
4173 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4174 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4175 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4176 */
4177 if ((priv->plat->maxmtu < ndev->max_mtu) &&
4178 (priv->plat->maxmtu >= ndev->min_mtu))
4179 ndev->max_mtu = priv->plat->maxmtu;
4180 else if (priv->plat->maxmtu < ndev->min_mtu)
4181 dev_warn(priv->device,
4182 "%s: warning: maxmtu having invalid value (%d)\n",
4183 __func__, priv->plat->maxmtu);
4184
4185 if (flow_ctrl)
4186 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
4187
4188 /* Rx Watchdog is available in the COREs newer than the 3.40.
4189 * In some case, for example on bugged HW this feature
4190 * has to be disable and this can be done by passing the
4191 * riwt_off field from the platform.
4192 */
4193 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4194 priv->use_riwt = 1;
4195 dev_info(priv->device,
4196 "Enable RX Mitigation via HW Watchdog Timer\n");
4197 }
4198
4199 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4200 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4201
4202 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4203 (8 * priv->plat->rx_queues_to_use));
4204 }
4205
4206 spin_lock_init(&priv->lock);
4207
4208 /* If a specific clk_csr value is passed from the platform
4209 * this means that the CSR Clock Range selection cannot be
4210 * changed at run-time and it is fixed. Viceversa the driver'll try to
4211 * set the MDC clock dynamically according to the csr actual
4212 * clock input.
4213 */
4214 if (!priv->plat->clk_csr)
4215 stmmac_clk_csr_set(priv);
4216 else
4217 priv->clk_csr = priv->plat->clk_csr;
4218
4219 stmmac_check_pcs_mode(priv);
4220
4221 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4222 priv->hw->pcs != STMMAC_PCS_TBI &&
4223 priv->hw->pcs != STMMAC_PCS_RTBI) {
4224 /* MDIO bus Registration */
4225 ret = stmmac_mdio_register(ndev);
4226 if (ret < 0) {
4227 dev_err(priv->device,
4228 "%s: MDIO bus (id: %d) registration failed",
4229 __func__, priv->plat->bus_id);
4230 goto error_mdio_register;
4231 }
4232 }
4233
4234 ret = register_netdev(ndev);
4235 if (ret) {
4236 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4237 __func__, ret);
4238 goto error_netdev_register;
4239 }
4240
4241 return ret;
4242
4243 error_netdev_register:
4244 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4245 priv->hw->pcs != STMMAC_PCS_TBI &&
4246 priv->hw->pcs != STMMAC_PCS_RTBI)
4247 stmmac_mdio_unregister(ndev);
4248 error_mdio_register:
4249 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4250 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4251
4252 netif_napi_del(&rx_q->napi);
4253 }
4254 error_hw_init:
4255 free_netdev(ndev);
4256
4257 return ret;
4258 }
4259 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4260
4261 /**
4262 * stmmac_dvr_remove
4263 * @dev: device pointer
4264 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4265 * changes the link status, releases the DMA descriptor rings.
4266 */
4267 int stmmac_dvr_remove(struct device *dev)
4268 {
4269 struct net_device *ndev = dev_get_drvdata(dev);
4270 struct stmmac_priv *priv = netdev_priv(ndev);
4271
4272 netdev_info(priv->dev, "%s: removing driver", __func__);
4273
4274 stmmac_stop_all_dma(priv);
4275
4276 priv->hw->mac->set_mac(priv->ioaddr, false);
4277 netif_carrier_off(ndev);
4278 unregister_netdev(ndev);
4279 if (priv->plat->stmmac_rst)
4280 reset_control_assert(priv->plat->stmmac_rst);
4281 clk_disable_unprepare(priv->plat->pclk);
4282 clk_disable_unprepare(priv->plat->stmmac_clk);
4283 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4284 priv->hw->pcs != STMMAC_PCS_TBI &&
4285 priv->hw->pcs != STMMAC_PCS_RTBI)
4286 stmmac_mdio_unregister(ndev);
4287 free_netdev(ndev);
4288
4289 return 0;
4290 }
4291 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4292
4293 /**
4294 * stmmac_suspend - suspend callback
4295 * @dev: device pointer
4296 * Description: this is the function to suspend the device and it is called
4297 * by the platform driver to stop the network queue, release the resources,
4298 * program the PMT register (for WoL), clean and release driver resources.
4299 */
4300 int stmmac_suspend(struct device *dev)
4301 {
4302 struct net_device *ndev = dev_get_drvdata(dev);
4303 struct stmmac_priv *priv = netdev_priv(ndev);
4304 unsigned long flags;
4305
4306 if (!ndev || !netif_running(ndev))
4307 return 0;
4308
4309 if (ndev->phydev)
4310 phy_stop(ndev->phydev);
4311
4312 spin_lock_irqsave(&priv->lock, flags);
4313
4314 netif_device_detach(ndev);
4315 stmmac_stop_all_queues(priv);
4316
4317 stmmac_disable_all_queues(priv);
4318
4319 /* Stop TX/RX DMA */
4320 stmmac_stop_all_dma(priv);
4321
4322 /* Enable Power down mode by programming the PMT regs */
4323 if (device_may_wakeup(priv->device)) {
4324 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4325 priv->irq_wake = 1;
4326 } else {
4327 priv->hw->mac->set_mac(priv->ioaddr, false);
4328 pinctrl_pm_select_sleep_state(priv->device);
4329 /* Disable clock in case of PWM is off */
4330 clk_disable(priv->plat->pclk);
4331 clk_disable(priv->plat->stmmac_clk);
4332 }
4333 spin_unlock_irqrestore(&priv->lock, flags);
4334
4335 priv->oldlink = false;
4336 priv->speed = SPEED_UNKNOWN;
4337 priv->oldduplex = DUPLEX_UNKNOWN;
4338 return 0;
4339 }
4340 EXPORT_SYMBOL_GPL(stmmac_suspend);
4341
4342 /**
4343 * stmmac_reset_queues_param - reset queue parameters
4344 * @dev: device pointer
4345 */
4346 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4347 {
4348 u32 rx_cnt = priv->plat->rx_queues_to_use;
4349 u32 tx_cnt = priv->plat->tx_queues_to_use;
4350 u32 queue;
4351
4352 for (queue = 0; queue < rx_cnt; queue++) {
4353 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4354
4355 rx_q->cur_rx = 0;
4356 rx_q->dirty_rx = 0;
4357 }
4358
4359 for (queue = 0; queue < tx_cnt; queue++) {
4360 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4361
4362 tx_q->cur_tx = 0;
4363 tx_q->dirty_tx = 0;
4364 }
4365 }
4366
4367 /**
4368 * stmmac_resume - resume callback
4369 * @dev: device pointer
4370 * Description: when resume this function is invoked to setup the DMA and CORE
4371 * in a usable state.
4372 */
4373 int stmmac_resume(struct device *dev)
4374 {
4375 struct net_device *ndev = dev_get_drvdata(dev);
4376 struct stmmac_priv *priv = netdev_priv(ndev);
4377 unsigned long flags;
4378
4379 if (!netif_running(ndev))
4380 return 0;
4381
4382 /* Power Down bit, into the PM register, is cleared
4383 * automatically as soon as a magic packet or a Wake-up frame
4384 * is received. Anyway, it's better to manually clear
4385 * this bit because it can generate problems while resuming
4386 * from another devices (e.g. serial console).
4387 */
4388 if (device_may_wakeup(priv->device)) {
4389 spin_lock_irqsave(&priv->lock, flags);
4390 priv->hw->mac->pmt(priv->hw, 0);
4391 spin_unlock_irqrestore(&priv->lock, flags);
4392 priv->irq_wake = 0;
4393 } else {
4394 pinctrl_pm_select_default_state(priv->device);
4395 /* enable the clk previously disabled */
4396 clk_enable(priv->plat->stmmac_clk);
4397 clk_enable(priv->plat->pclk);
4398 /* reset the phy so that it's ready */
4399 if (priv->mii)
4400 stmmac_mdio_reset(priv->mii);
4401 }
4402
4403 netif_device_attach(ndev);
4404
4405 spin_lock_irqsave(&priv->lock, flags);
4406
4407 stmmac_reset_queues_param(priv);
4408
4409 /* reset private mss value to force mss context settings at
4410 * next tso xmit (only used for gmac4).
4411 */
4412 priv->mss = 0;
4413
4414 stmmac_clear_descriptors(priv);
4415
4416 stmmac_hw_setup(ndev, false);
4417 stmmac_init_tx_coalesce(priv);
4418 stmmac_set_rx_mode(ndev);
4419
4420 stmmac_enable_all_queues(priv);
4421
4422 stmmac_start_all_queues(priv);
4423
4424 spin_unlock_irqrestore(&priv->lock, flags);
4425
4426 if (ndev->phydev)
4427 phy_start(ndev->phydev);
4428
4429 return 0;
4430 }
4431 EXPORT_SYMBOL_GPL(stmmac_resume);
4432
4433 #ifndef MODULE
4434 static int __init stmmac_cmdline_opt(char *str)
4435 {
4436 char *opt;
4437
4438 if (!str || !*str)
4439 return -EINVAL;
4440 while ((opt = strsep(&str, ",")) != NULL) {
4441 if (!strncmp(opt, "debug:", 6)) {
4442 if (kstrtoint(opt + 6, 0, &debug))
4443 goto err;
4444 } else if (!strncmp(opt, "phyaddr:", 8)) {
4445 if (kstrtoint(opt + 8, 0, &phyaddr))
4446 goto err;
4447 } else if (!strncmp(opt, "buf_sz:", 7)) {
4448 if (kstrtoint(opt + 7, 0, &buf_sz))
4449 goto err;
4450 } else if (!strncmp(opt, "tc:", 3)) {
4451 if (kstrtoint(opt + 3, 0, &tc))
4452 goto err;
4453 } else if (!strncmp(opt, "watchdog:", 9)) {
4454 if (kstrtoint(opt + 9, 0, &watchdog))
4455 goto err;
4456 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4457 if (kstrtoint(opt + 10, 0, &flow_ctrl))
4458 goto err;
4459 } else if (!strncmp(opt, "pause:", 6)) {
4460 if (kstrtoint(opt + 6, 0, &pause))
4461 goto err;
4462 } else if (!strncmp(opt, "eee_timer:", 10)) {
4463 if (kstrtoint(opt + 10, 0, &eee_timer))
4464 goto err;
4465 } else if (!strncmp(opt, "chain_mode:", 11)) {
4466 if (kstrtoint(opt + 11, 0, &chain_mode))
4467 goto err;
4468 }
4469 }
4470 return 0;
4471
4472 err:
4473 pr_err("%s: ERROR broken module parameter conversion", __func__);
4474 return -EINVAL;
4475 }
4476
4477 __setup("stmmaceth=", stmmac_cmdline_opt);
4478 #endif /* MODULE */
4479
4480 static int __init stmmac_init(void)
4481 {
4482 #ifdef CONFIG_DEBUG_FS
4483 /* Create debugfs main directory if it doesn't exist yet */
4484 if (!stmmac_fs_dir) {
4485 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4486
4487 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4488 pr_err("ERROR %s, debugfs create directory failed\n",
4489 STMMAC_RESOURCE_NAME);
4490
4491 return -ENOMEM;
4492 }
4493 }
4494 #endif
4495
4496 return 0;
4497 }
4498
4499 static void __exit stmmac_exit(void)
4500 {
4501 #ifdef CONFIG_DEBUG_FS
4502 debugfs_remove_recursive(stmmac_fs_dir);
4503 #endif
4504 }
4505
4506 module_init(stmmac_init)
4507 module_exit(stmmac_exit)
4508
4509 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4510 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4511 MODULE_LICENSE("GPL");