net: mvneta: driver for Marvell Armada 370/XP network unit
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / marvell / mvneta.c
CommitLineData
c5aff182
TP
1/*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
15#include <linux/version.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/platform_device.h>
19#include <linux/skbuff.h>
20#include <linux/inetdevice.h>
21#include <linux/mbus.h>
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <net/ip.h>
25#include <net/ipv6.h>
26#include <linux/of.h>
27#include <linux/of_irq.h>
28#include <linux/of_mdio.h>
29#include <linux/of_net.h>
30#include <linux/of_address.h>
31#include <linux/phy.h>
32
33/* Registers */
34#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
35#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
36#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
37#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
38#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
39#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
40#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
41#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
42#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
43#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
44#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
45#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
46#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
47#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
48#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
49#define MVNETA_PORT_RX_RESET 0x1cc0
50#define MVNETA_PORT_RX_DMA_RESET BIT(0)
51#define MVNETA_PHY_ADDR 0x2000
52#define MVNETA_PHY_ADDR_MASK 0x1f
53#define MVNETA_MBUS_RETRY 0x2010
54#define MVNETA_UNIT_INTR_CAUSE 0x2080
55#define MVNETA_UNIT_CONTROL 0x20B0
56#define MVNETA_PHY_POLLING_ENABLE BIT(1)
57#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
58#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
59#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
60#define MVNETA_BASE_ADDR_ENABLE 0x2290
61#define MVNETA_PORT_CONFIG 0x2400
62#define MVNETA_UNI_PROMISC_MODE BIT(0)
63#define MVNETA_DEF_RXQ(q) ((q) << 1)
64#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
65#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
66#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
67#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
68#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
69#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
70#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
71 MVNETA_DEF_RXQ_ARP(q) | \
72 MVNETA_DEF_RXQ_TCP(q) | \
73 MVNETA_DEF_RXQ_UDP(q) | \
74 MVNETA_DEF_RXQ_BPDU(q) | \
75 MVNETA_TX_UNSET_ERR_SUM | \
76 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
77#define MVNETA_PORT_CONFIG_EXTEND 0x2404
78#define MVNETA_MAC_ADDR_LOW 0x2414
79#define MVNETA_MAC_ADDR_HIGH 0x2418
80#define MVNETA_SDMA_CONFIG 0x241c
81#define MVNETA_SDMA_BRST_SIZE_16 4
82#define MVNETA_NO_DESC_SWAP 0x0
83#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
84#define MVNETA_RX_NO_DATA_SWAP BIT(4)
85#define MVNETA_TX_NO_DATA_SWAP BIT(5)
86#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
87#define MVNETA_PORT_STATUS 0x2444
88#define MVNETA_TX_IN_PRGRS BIT(1)
89#define MVNETA_TX_FIFO_EMPTY BIT(8)
90#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
91#define MVNETA_TYPE_PRIO 0x24bc
92#define MVNETA_FORCE_UNI BIT(21)
93#define MVNETA_TXQ_CMD_1 0x24e4
94#define MVNETA_TXQ_CMD 0x2448
95#define MVNETA_TXQ_DISABLE_SHIFT 8
96#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
97#define MVNETA_ACC_MODE 0x2500
98#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
99#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
100#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
101#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
102#define MVNETA_INTR_NEW_CAUSE 0x25a0
103#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
104#define MVNETA_INTR_NEW_MASK 0x25a4
105#define MVNETA_INTR_OLD_CAUSE 0x25a8
106#define MVNETA_INTR_OLD_MASK 0x25ac
107#define MVNETA_INTR_MISC_CAUSE 0x25b0
108#define MVNETA_INTR_MISC_MASK 0x25b4
109#define MVNETA_INTR_ENABLE 0x25b8
110#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
111#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
112#define MVNETA_RXQ_CMD 0x2680
113#define MVNETA_RXQ_DISABLE_SHIFT 8
114#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
115#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
116#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
117#define MVNETA_GMAC_CTRL_0 0x2c00
118#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
119#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
120#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
121#define MVNETA_GMAC_CTRL_2 0x2c08
122#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
123#define MVNETA_GMAC2_PORT_RGMII BIT(4)
124#define MVNETA_GMAC2_PORT_RESET BIT(6)
125#define MVNETA_GMAC_STATUS 0x2c10
126#define MVNETA_GMAC_LINK_UP BIT(0)
127#define MVNETA_GMAC_SPEED_1000 BIT(1)
128#define MVNETA_GMAC_SPEED_100 BIT(2)
129#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
130#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
131#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
132#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
133#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
134#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
135#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
136#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
137#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
138#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
139#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
140#define MVNETA_MIB_COUNTERS_BASE 0x3080
141#define MVNETA_MIB_LATE_COLLISION 0x7c
142#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
143#define MVNETA_DA_FILT_OTH_MCAST 0x3500
144#define MVNETA_DA_FILT_UCAST_BASE 0x3600
145#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
146#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
147#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
148#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
149#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
150#define MVNETA_TXQ_DEC_SENT_SHIFT 16
151#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
152#define MVNETA_TXQ_SENT_DESC_SHIFT 16
153#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
154#define MVNETA_PORT_TX_RESET 0x3cf0
155#define MVNETA_PORT_TX_DMA_RESET BIT(0)
156#define MVNETA_TX_MTU 0x3e0c
157#define MVNETA_TX_TOKEN_SIZE 0x3e14
158#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
159#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
160#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
161
162#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
163
164/* Descriptor ring Macros */
165#define MVNETA_QUEUE_NEXT_DESC(q, index) \
166 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
167
168/* Various constants */
169
170/* Coalescing */
171#define MVNETA_TXDONE_COAL_PKTS 16
172#define MVNETA_RX_COAL_PKTS 32
173#define MVNETA_RX_COAL_USEC 100
174
175/* Timer */
176#define MVNETA_TX_DONE_TIMER_PERIOD 10
177
178/* Napi polling weight */
179#define MVNETA_RX_POLL_WEIGHT 64
180
181/*
182 * The two bytes Marvell header. Either contains a special value used
183 * by Marvell switches when a specific hardware mode is enabled (not
184 * supported by this driver) or is filled automatically by zeroes on
185 * the RX side. Those two bytes being at the front of the Ethernet
186 * header, they allow to have the IP header aligned on a 4 bytes
187 * boundary automatically: the hardware skips those two bytes on its
188 * own.
189 */
190#define MVNETA_MH_SIZE 2
191
192#define MVNETA_VLAN_TAG_LEN 4
193
194#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
195#define MVNETA_TX_CSUM_MAX_SIZE 9800
196#define MVNETA_ACC_MODE_EXT 1
197
198/* Timeout constants */
199#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
200#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
201#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
202
203#define MVNETA_TX_MTU_MAX 0x3ffff
204
205/* Max number of Rx descriptors */
206#define MVNETA_MAX_RXD 128
207
208/* Max number of Tx descriptors */
209#define MVNETA_MAX_TXD 532
210
211/* descriptor aligned size */
212#define MVNETA_DESC_ALIGNED_SIZE 32
213
214#define MVNETA_RX_PKT_SIZE(mtu) \
215 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
216 ETH_HLEN + ETH_FCS_LEN, \
217 MVNETA_CPU_D_CACHE_LINE_SIZE)
218
219#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
220
221struct mvneta_stats {
222 struct u64_stats_sync syncp;
223 u64 packets;
224 u64 bytes;
225};
226
227struct mvneta_port {
228 int pkt_size;
229 void __iomem *base;
230 struct mvneta_rx_queue *rxqs;
231 struct mvneta_tx_queue *txqs;
232 struct timer_list tx_done_timer;
233 struct net_device *dev;
234
235 u32 cause_rx_tx;
236 struct napi_struct napi;
237
238 /* Flags */
239 unsigned long flags;
240#define MVNETA_F_TX_DONE_TIMER_BIT 0
241
242 /* Napi weight */
243 int weight;
244
245 /* Core clock */
246 unsigned int clk_rate_hz;
247 u8 mcast_count[256];
248 u16 tx_ring_size;
249 u16 rx_ring_size;
250 struct mvneta_stats tx_stats;
251 struct mvneta_stats rx_stats;
252
253 struct mii_bus *mii_bus;
254 struct phy_device *phy_dev;
255 phy_interface_t phy_interface;
256 struct device_node *phy_node;
257 unsigned int link;
258 unsigned int duplex;
259 unsigned int speed;
260};
261
262/*
263 * The mvneta_tx_desc and mvneta_rx_desc structures describe the
264 * layout of the transmit and reception DMA descriptors, and their
265 * layout is therefore defined by the hardware design
266 */
267struct mvneta_tx_desc {
268 u32 command; /* Options used by HW for packet transmitting.*/
269#define MVNETA_TX_L3_OFF_SHIFT 0
270#define MVNETA_TX_IP_HLEN_SHIFT 8
271#define MVNETA_TX_L4_UDP BIT(16)
272#define MVNETA_TX_L3_IP6 BIT(17)
273#define MVNETA_TXD_IP_CSUM BIT(18)
274#define MVNETA_TXD_Z_PAD BIT(19)
275#define MVNETA_TXD_L_DESC BIT(20)
276#define MVNETA_TXD_F_DESC BIT(21)
277#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
278 MVNETA_TXD_L_DESC | \
279 MVNETA_TXD_F_DESC)
280#define MVNETA_TX_L4_CSUM_FULL BIT(30)
281#define MVNETA_TX_L4_CSUM_NOT BIT(31)
282
283 u16 reserverd1; /* csum_l4 (for future use) */
284 u16 data_size; /* Data size of transmitted packet in bytes */
285 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
286 u32 reserved2; /* hw_cmd - (for future use, PMT) */
287 u32 reserved3[4]; /* Reserved - (for future use) */
288};
289
290struct mvneta_rx_desc {
291 u32 status; /* Info about received packet */
292#define MVNETA_RXD_ERR_CRC 0x0
293#define MVNETA_RXD_ERR_SUMMARY BIT(16)
294#define MVNETA_RXD_ERR_OVERRUN BIT(17)
295#define MVNETA_RXD_ERR_LEN BIT(18)
296#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
297#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
298#define MVNETA_RXD_L3_IP4 BIT(25)
299#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
300#define MVNETA_RXD_L4_CSUM_OK BIT(30)
301
302 u16 reserved1; /* pnc_info - (for future use, PnC) */
303 u16 data_size; /* Size of received packet in bytes */
304 u32 buf_phys_addr; /* Physical address of the buffer */
305 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
306 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
307 u16 reserved3; /* prefetch_cmd, for future use */
308 u16 reserved4; /* csum_l4 - (for future use, PnC) */
309 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
310 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
311};
312
313struct mvneta_tx_queue {
314 /* Number of this TX queue, in the range 0-7 */
315 u8 id;
316
317 /* Number of TX DMA descriptors in the descriptor ring */
318 int size;
319
320 /* Number of currently used TX DMA descriptor in the
321 * descriptor ring */
322 int count;
323
324 /* Array of transmitted skb */
325 struct sk_buff **tx_skb;
326
327 /* Index of last TX DMA descriptor that was inserted */
328 int txq_put_index;
329
330 /* Index of the TX DMA descriptor to be cleaned up */
331 int txq_get_index;
332
333 u32 done_pkts_coal;
334
335 /* Virtual address of the TX DMA descriptors array */
336 struct mvneta_tx_desc *descs;
337
338 /* DMA address of the TX DMA descriptors array */
339 dma_addr_t descs_phys;
340
341 /* Index of the last TX DMA descriptor */
342 int last_desc;
343
344 /* Index of the next TX DMA descriptor to process */
345 int next_desc_to_proc;
346};
347
348struct mvneta_rx_queue {
349 /* rx queue number, in the range 0-7 */
350 u8 id;
351
352 /* num of rx descriptors in the rx descriptor ring */
353 int size;
354
355 /* counter of times when mvneta_refill() failed */
356 int missed;
357
358 u32 pkts_coal;
359 u32 time_coal;
360
361 /* Virtual address of the RX DMA descriptors array */
362 struct mvneta_rx_desc *descs;
363
364 /* DMA address of the RX DMA descriptors array */
365 dma_addr_t descs_phys;
366
367 /* Index of the last RX DMA descriptor */
368 int last_desc;
369
370 /* Index of the next RX DMA descriptor to process */
371 int next_desc_to_proc;
372};
373
374static int rxq_number = 8;
375static int txq_number = 8;
376
377static int rxq_def;
378static int txq_def;
379
380#define MVNETA_DRIVER_NAME "mvneta"
381#define MVNETA_DRIVER_VERSION "1.0"
382
383/* Utility/helper methods */
384
385/* Write helper method */
386static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
387{
388 writel(data, pp->base + offset);
389}
390
391/* Read helper method */
392static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
393{
394 return readl(pp->base + offset);
395}
396
397/* Increment txq get counter */
398static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
399{
400 txq->txq_get_index++;
401 if (txq->txq_get_index == txq->size)
402 txq->txq_get_index = 0;
403}
404
405/* Increment txq put counter */
406static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
407{
408 txq->txq_put_index++;
409 if (txq->txq_put_index == txq->size)
410 txq->txq_put_index = 0;
411}
412
413
414/* Clear all MIB counters */
415static void mvneta_mib_counters_clear(struct mvneta_port *pp)
416{
417 int i;
418 u32 dummy;
419
420 /* Perform dummy reads from MIB counters */
421 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
422 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
423}
424
425/* Get System Network Statistics */
426struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
427 struct rtnl_link_stats64 *stats)
428{
429 struct mvneta_port *pp = netdev_priv(dev);
430 unsigned int start;
431
432 memset(stats, 0, sizeof(struct rtnl_link_stats64));
433
434 do {
435 start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
436 stats->rx_packets = pp->rx_stats.packets;
437 stats->rx_bytes = pp->rx_stats.bytes;
438 } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
439
440
441 do {
442 start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
443 stats->tx_packets = pp->tx_stats.packets;
444 stats->tx_bytes = pp->tx_stats.bytes;
445 } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
446
447 stats->rx_errors = dev->stats.rx_errors;
448 stats->rx_dropped = dev->stats.rx_dropped;
449
450 stats->tx_dropped = dev->stats.tx_dropped;
451
452 return stats;
453}
454
455/* Rx descriptors helper methods */
456
457/*
458 * Checks whether the given RX descriptor is both the first and the
459 * last descriptor for the RX packet. Each RX packet is currently
460 * received through a single RX descriptor, so not having each RX
461 * descriptor with its first and last bits set is an error
462 */
463static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
464{
465 return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
466 MVNETA_RXD_FIRST_LAST_DESC;
467}
468
469/* Add number of descriptors ready to receive new packets */
470static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
471 struct mvneta_rx_queue *rxq,
472 int ndescs)
473{
474 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
475 * be added at once */
476 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
477 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
478 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
479 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
480 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
481 }
482
483 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
484 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
485}
486
487/* Get number of RX descriptors occupied by received packets */
488static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
489 struct mvneta_rx_queue *rxq)
490{
491 u32 val;
492
493 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
494 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
495}
496
497/*
498 * Update num of rx desc called upon return from rx path or
499 * from mvneta_rxq_drop_pkts().
500 */
501static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
502 struct mvneta_rx_queue *rxq,
503 int rx_done, int rx_filled)
504{
505 u32 val;
506
507 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
508 val = rx_done |
509 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
510 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
511 return;
512 }
513
514 /* Only 255 descriptors can be added at once */
515 while ((rx_done > 0) || (rx_filled > 0)) {
516 if (rx_done <= 0xff) {
517 val = rx_done;
518 rx_done = 0;
519 } else {
520 val = 0xff;
521 rx_done -= 0xff;
522 }
523 if (rx_filled <= 0xff) {
524 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
525 rx_filled = 0;
526 } else {
527 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
528 rx_filled -= 0xff;
529 }
530 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
531 }
532}
533
534/* Get pointer to next RX descriptor to be processed by SW */
535static struct mvneta_rx_desc *
536mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
537{
538 int rx_desc = rxq->next_desc_to_proc;
539
540 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
541 return rxq->descs + rx_desc;
542}
543
544/* Change maximum receive size of the port. */
545static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
546{
547 u32 val;
548
549 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
550 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
551 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
552 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
553 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
554}
555
556
557/* Set rx queue offset */
558static void mvneta_rxq_offset_set(struct mvneta_port *pp,
559 struct mvneta_rx_queue *rxq,
560 int offset)
561{
562 u32 val;
563
564 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
565 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
566
567 /* Offset is in */
568 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
569 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
570}
571
572
573/* Tx descriptors helper methods */
574
575/* Update HW with number of TX descriptors to be sent */
576static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
577 struct mvneta_tx_queue *txq,
578 int pend_desc)
579{
580 u32 val;
581
582 /* Only 255 descriptors can be added at once ; Assume caller
583 process TX desriptors in quanta less than 256 */
584 val = pend_desc;
585 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
586}
587
588/* Get pointer to next TX descriptor to be processed (send) by HW */
589static struct mvneta_tx_desc *
590mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
591{
592 int tx_desc = txq->next_desc_to_proc;
593
594 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
595 return txq->descs + tx_desc;
596}
597
598/* Release the last allocated TX descriptor. Useful to handle DMA
599 * mapping failures in the TX path. */
600static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
601{
602 if (txq->next_desc_to_proc == 0)
603 txq->next_desc_to_proc = txq->last_desc - 1;
604 else
605 txq->next_desc_to_proc--;
606}
607
608/* Set rxq buf size */
609static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
610 struct mvneta_rx_queue *rxq,
611 int buf_size)
612{
613 u32 val;
614
615 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
616
617 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
618 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
619
620 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
621}
622
623/* Disable buffer management (BM) */
624static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
625 struct mvneta_rx_queue *rxq)
626{
627 u32 val;
628
629 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
630 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
631 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
632}
633
634
635
636/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
637static void __devinit mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
638{
639 u32 val;
640
641 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
642
643 if (enable)
644 val |= MVNETA_GMAC2_PORT_RGMII;
645 else
646 val &= ~MVNETA_GMAC2_PORT_RGMII;
647
648 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
649}
650
651/* Config SGMII port */
652static void __devinit mvneta_port_sgmii_config(struct mvneta_port *pp)
653{
654 u32 val;
655
656 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
657 val |= MVNETA_GMAC2_PSC_ENABLE;
658 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
659}
660
661/* Start the Ethernet port RX and TX activity */
662static void mvneta_port_up(struct mvneta_port *pp)
663{
664 int queue;
665 u32 q_map;
666
667 /* Enable all initialized TXs. */
668 mvneta_mib_counters_clear(pp);
669 q_map = 0;
670 for (queue = 0; queue < txq_number; queue++) {
671 struct mvneta_tx_queue *txq = &pp->txqs[queue];
672 if (txq->descs != NULL)
673 q_map |= (1 << queue);
674 }
675 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
676
677 /* Enable all initialized RXQs. */
678 q_map = 0;
679 for (queue = 0; queue < rxq_number; queue++) {
680 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
681 if (rxq->descs != NULL)
682 q_map |= (1 << queue);
683 }
684
685 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
686}
687
688/* Stop the Ethernet port activity */
689static void mvneta_port_down(struct mvneta_port *pp)
690{
691 u32 val;
692 int count;
693
694 /* Stop Rx port activity. Check port Rx activity. */
695 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
696
697 /* Issue stop command for active channels only */
698 if (val != 0)
699 mvreg_write(pp, MVNETA_RXQ_CMD,
700 val << MVNETA_RXQ_DISABLE_SHIFT);
701
702 /* Wait for all Rx activity to terminate. */
703 count = 0;
704 do {
705 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
706 netdev_warn(pp->dev,
707 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
708 val);
709 break;
710 }
711 mdelay(1);
712
713 val = mvreg_read(pp, MVNETA_RXQ_CMD);
714 } while (val & 0xff);
715
716 /* Stop Tx port activity. Check port Tx activity. Issue stop
717 command for active channels only */
718 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
719
720 if (val != 0)
721 mvreg_write(pp, MVNETA_TXQ_CMD,
722 (val << MVNETA_TXQ_DISABLE_SHIFT));
723
724 /* Wait for all Tx activity to terminate. */
725 count = 0;
726 do {
727 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
728 netdev_warn(pp->dev,
729 "TIMEOUT for TX stopped status=0x%08x\n",
730 val);
731 break;
732 }
733 mdelay(1);
734
735 /* Check TX Command reg that all Txqs are stopped */
736 val = mvreg_read(pp, MVNETA_TXQ_CMD);
737
738 } while (val & 0xff);
739
740 /* Double check to verify that TX FIFO is empty */
741 count = 0;
742 do {
743 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
744 netdev_warn(pp->dev,
745 "TX FIFO empty timeout status=0x08%x\n",
746 val);
747 break;
748 }
749 mdelay(1);
750
751 val = mvreg_read(pp, MVNETA_PORT_STATUS);
752 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
753 (val & MVNETA_TX_IN_PRGRS));
754
755 udelay(200);
756}
757
758/* Enable the port by setting the port enable bit of the MAC control register */
759static void mvneta_port_enable(struct mvneta_port *pp)
760{
761 u32 val;
762
763 /* Enable port */
764 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
765 val |= MVNETA_GMAC0_PORT_ENABLE;
766 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
767}
768
769/* Disable the port and wait for about 200 usec before retuning */
770static void mvneta_port_disable(struct mvneta_port *pp)
771{
772 u32 val;
773
774 /* Reset the Enable bit in the Serial Control Register */
775 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
776 val &= ~MVNETA_GMAC0_PORT_ENABLE;
777 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
778
779 udelay(200);
780}
781
782/* Multicast tables methods */
783
784/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
785static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
786{
787 int offset;
788 u32 val;
789
790 if (queue == -1) {
791 val = 0;
792 } else {
793 val = 0x1 | (queue << 1);
794 val |= (val << 24) | (val << 16) | (val << 8);
795 }
796
797 for (offset = 0; offset <= 0xc; offset += 4)
798 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
799}
800
801/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
802static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
803{
804 int offset;
805 u32 val;
806
807 if (queue == -1) {
808 val = 0;
809 } else {
810 val = 0x1 | (queue << 1);
811 val |= (val << 24) | (val << 16) | (val << 8);
812 }
813
814 for (offset = 0; offset <= 0xfc; offset += 4)
815 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
816
817}
818
819/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
820static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
821{
822 int offset;
823 u32 val;
824
825 if (queue == -1) {
826 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
827 val = 0;
828 } else {
829 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
830 val = 0x1 | (queue << 1);
831 val |= (val << 24) | (val << 16) | (val << 8);
832 }
833
834 for (offset = 0; offset <= 0xfc; offset += 4)
835 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
836}
837
838/* This method sets defaults to the NETA port:
839 * Clears interrupt Cause and Mask registers.
840 * Clears all MAC tables.
841 * Sets defaults to all registers.
842 * Resets RX and TX descriptor rings.
843 * Resets PHY.
844 * This method can be called after mvneta_port_down() to return the port
845 * settings to defaults.
846 */
847static void mvneta_defaults_set(struct mvneta_port *pp)
848{
849 int cpu;
850 int queue;
851 u32 val;
852
853 /* Clear all Cause registers */
854 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
855 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
856 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
857
858 /* Mask all interrupts */
859 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
860 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
861 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
862 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
863
864 /* Enable MBUS Retry bit16 */
865 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
866
867 /* Set CPU queue access map - all CPUs have access to all RX
868 queues and to all TX queues */
869 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
870 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
871 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
872 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
873
874 /* Reset RX and TX DMAs */
875 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
876 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
877
878 /* Disable Legacy WRR, Disable EJP, Release from reset */
879 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
880 for (queue = 0; queue < txq_number; queue++) {
881 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
882 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
883 }
884
885 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
886 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
887
888 /* Set Port Acceleration Mode */
889 val = MVNETA_ACC_MODE_EXT;
890 mvreg_write(pp, MVNETA_ACC_MODE, val);
891
892 /* Update val of portCfg register accordingly with all RxQueue types */
893 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
894 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
895
896 val = 0;
897 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
898 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
899
900 /* Build PORT_SDMA_CONFIG_REG */
901 val = 0;
902
903 /* Default burst size */
904 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
905 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
906
907 val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
908 MVNETA_NO_DESC_SWAP);
909
910 /* Assign port SDMA configuration */
911 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
912
913 mvneta_set_ucast_table(pp, -1);
914 mvneta_set_special_mcast_table(pp, -1);
915 mvneta_set_other_mcast_table(pp, -1);
916
917 /* Set port interrupt enable register - default enable all */
918 mvreg_write(pp, MVNETA_INTR_ENABLE,
919 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
920 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
921}
922
923/* Set max sizes for tx queues */
924static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
925
926{
927 u32 val, size, mtu;
928 int queue;
929
930 mtu = max_tx_size * 8;
931 if (mtu > MVNETA_TX_MTU_MAX)
932 mtu = MVNETA_TX_MTU_MAX;
933
934 /* Set MTU */
935 val = mvreg_read(pp, MVNETA_TX_MTU);
936 val &= ~MVNETA_TX_MTU_MAX;
937 val |= mtu;
938 mvreg_write(pp, MVNETA_TX_MTU, val);
939
940 /* TX token size and all TXQs token size must be larger that MTU */
941 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
942
943 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
944 if (size < mtu) {
945 size = mtu;
946 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
947 val |= size;
948 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
949 }
950 for (queue = 0; queue < txq_number; queue++) {
951 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
952
953 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
954 if (size < mtu) {
955 size = mtu;
956 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
957 val |= size;
958 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
959 }
960 }
961}
962
963/* Set unicast address */
964static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
965 int queue)
966{
967 unsigned int unicast_reg;
968 unsigned int tbl_offset;
969 unsigned int reg_offset;
970
971 /* Locate the Unicast table entry */
972 last_nibble = (0xf & last_nibble);
973
974 /* offset from unicast tbl base */
975 tbl_offset = (last_nibble / 4) * 4;
976
977 /* offset within the above reg */
978 reg_offset = last_nibble % 4;
979
980 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
981
982 if (queue == -1) {
983 /* Clear accepts frame bit at specified unicast DA tbl entry */
984 unicast_reg &= ~(0xff << (8 * reg_offset));
985 } else {
986 unicast_reg &= ~(0xff << (8 * reg_offset));
987 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
988 }
989
990 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
991}
992
993/* Set mac address */
994static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
995 int queue)
996{
997 unsigned int mac_h;
998 unsigned int mac_l;
999
1000 if (queue != -1) {
1001 mac_l = (addr[4] << 8) | (addr[5]);
1002 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1003 (addr[2] << 8) | (addr[3] << 0);
1004
1005 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1006 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1007 }
1008
1009 /* Accept frames of this address */
1010 mvneta_set_ucast_addr(pp, addr[5], queue);
1011}
1012
1013/*
1014 * Set the number of packets that will be received before
1015 * RX interrupt will be generated by HW.
1016 */
1017static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1018 struct mvneta_rx_queue *rxq, u32 value)
1019{
1020 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1021 value | MVNETA_RXQ_NON_OCCUPIED(0));
1022 rxq->pkts_coal = value;
1023}
1024
1025/*
1026 * Set the time delay in usec before
1027 * RX interrupt will be generated by HW.
1028 */
1029static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1030 struct mvneta_rx_queue *rxq, u32 value)
1031{
1032 u32 val = (pp->clk_rate_hz / 1000000) * value;
1033
1034 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1035 rxq->time_coal = value;
1036}
1037
1038/* Set threshold for TX_DONE pkts coalescing */
1039static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1040 struct mvneta_tx_queue *txq, u32 value)
1041{
1042 u32 val;
1043
1044 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1045
1046 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1047 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1048
1049 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1050
1051 txq->done_pkts_coal = value;
1052}
1053
1054/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
1055static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
1056{
1057 if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
1058 pp->tx_done_timer.expires = jiffies +
1059 msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
1060 add_timer(&pp->tx_done_timer);
1061 }
1062}
1063
1064
1065/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1066static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1067 u32 phys_addr, u32 cookie)
1068{
1069 rx_desc->buf_cookie = cookie;
1070 rx_desc->buf_phys_addr = phys_addr;
1071}
1072
1073/* Decrement sent descriptors counter */
1074static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1075 struct mvneta_tx_queue *txq,
1076 int sent_desc)
1077{
1078 u32 val;
1079
1080 /* Only 255 TX descriptors can be updated at once */
1081 while (sent_desc > 0xff) {
1082 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1083 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1084 sent_desc = sent_desc - 0xff;
1085 }
1086
1087 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1088 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1089}
1090
1091/* Get number of TX descriptors already sent by HW */
1092static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1093 struct mvneta_tx_queue *txq)
1094{
1095 u32 val;
1096 int sent_desc;
1097
1098 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1099 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1100 MVNETA_TXQ_SENT_DESC_SHIFT;
1101
1102 return sent_desc;
1103}
1104
1105/*
1106 * Get number of sent descriptors and decrement counter.
1107 * The number of sent descriptors is returned.
1108 */
1109static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1110 struct mvneta_tx_queue *txq)
1111{
1112 int sent_desc;
1113
1114 /* Get number of sent descriptors */
1115 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1116
1117 /* Decrement sent descriptors counter */
1118 if (sent_desc)
1119 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1120
1121 return sent_desc;
1122}
1123
1124/* Set TXQ descriptors fields relevant for CSUM calculation */
1125static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1126 int ip_hdr_len, int l4_proto)
1127{
1128 u32 command;
1129
1130 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1131 G_L4_chk, L4_type; required only for checksum
1132 calculation */
1133 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1134 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1135
1136 if (l3_proto == swab16(ETH_P_IP))
1137 command |= MVNETA_TXD_IP_CSUM;
1138 else
1139 command |= MVNETA_TX_L3_IP6;
1140
1141 if (l4_proto == IPPROTO_TCP)
1142 command |= MVNETA_TX_L4_CSUM_FULL;
1143 else if (l4_proto == IPPROTO_UDP)
1144 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1145 else
1146 command |= MVNETA_TX_L4_CSUM_NOT;
1147
1148 return command;
1149}
1150
1151
1152/* Display more error info */
1153static void mvneta_rx_error(struct mvneta_port *pp,
1154 struct mvneta_rx_desc *rx_desc)
1155{
1156 u32 status = rx_desc->status;
1157
1158 if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
1159 netdev_err(pp->dev,
1160 "bad rx status %08x (buffer oversize), size=%d\n",
1161 rx_desc->status, rx_desc->data_size);
1162 return;
1163 }
1164
1165 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1166 case MVNETA_RXD_ERR_CRC:
1167 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1168 status, rx_desc->data_size);
1169 break;
1170 case MVNETA_RXD_ERR_OVERRUN:
1171 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1172 status, rx_desc->data_size);
1173 break;
1174 case MVNETA_RXD_ERR_LEN:
1175 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1176 status, rx_desc->data_size);
1177 break;
1178 case MVNETA_RXD_ERR_RESOURCE:
1179 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1180 status, rx_desc->data_size);
1181 break;
1182 }
1183}
1184
1185/* Handle RX checksum offload */
1186static void mvneta_rx_csum(struct mvneta_port *pp,
1187 struct mvneta_rx_desc *rx_desc,
1188 struct sk_buff *skb)
1189{
1190 if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
1191 (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
1192 skb->csum = 0;
1193 skb->ip_summed = CHECKSUM_UNNECESSARY;
1194 return;
1195 }
1196
1197 skb->ip_summed = CHECKSUM_NONE;
1198}
1199
1200/* Return tx queue pointer (find last set bit) according to causeTxDone reg */
1201static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1202 u32 cause)
1203{
1204 int queue = fls(cause) - 1;
1205
1206 return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
1207}
1208
1209/* Free tx queue skbuffs */
1210static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1211 struct mvneta_tx_queue *txq, int num)
1212{
1213 int i;
1214
1215 for (i = 0; i < num; i++) {
1216 struct mvneta_tx_desc *tx_desc = txq->descs +
1217 txq->txq_get_index;
1218 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1219
1220 mvneta_txq_inc_get(txq);
1221
1222 if (!skb)
1223 continue;
1224
1225 dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
1226 tx_desc->data_size, DMA_TO_DEVICE);
1227 dev_kfree_skb_any(skb);
1228 }
1229}
1230
1231/* Handle end of transmission */
1232static int mvneta_txq_done(struct mvneta_port *pp,
1233 struct mvneta_tx_queue *txq)
1234{
1235 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1236 int tx_done;
1237
1238 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1239 if (tx_done == 0)
1240 return tx_done;
1241 mvneta_txq_bufs_free(pp, txq, tx_done);
1242
1243 txq->count -= tx_done;
1244
1245 if (netif_tx_queue_stopped(nq)) {
1246 if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
1247 netif_tx_wake_queue(nq);
1248 }
1249
1250 return tx_done;
1251}
1252
1253/* Refill processing */
1254static int mvneta_rx_refill(struct mvneta_port *pp,
1255 struct mvneta_rx_desc *rx_desc)
1256
1257{
1258 dma_addr_t phys_addr;
1259 struct sk_buff *skb;
1260
1261 skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
1262 if (!skb)
1263 return -ENOMEM;
1264
1265 phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
1266 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1267 DMA_FROM_DEVICE);
1268 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1269 dev_kfree_skb(skb);
1270 return -ENOMEM;
1271 }
1272
1273 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
1274
1275 return 0;
1276}
1277
1278/* Handle tx checksum */
1279static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1280{
1281 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1282 int ip_hdr_len = 0;
1283 u8 l4_proto;
1284
1285 if (skb->protocol == htons(ETH_P_IP)) {
1286 struct iphdr *ip4h = ip_hdr(skb);
1287
1288 /* Calculate IPv4 checksum and L4 checksum */
1289 ip_hdr_len = ip4h->ihl;
1290 l4_proto = ip4h->protocol;
1291 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1292 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1293
1294 /* Read l4_protocol from one of IPv6 extra headers */
1295 if (skb_network_header_len(skb) > 0)
1296 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1297 l4_proto = ip6h->nexthdr;
1298 } else
1299 return MVNETA_TX_L4_CSUM_NOT;
1300
1301 return mvneta_txq_desc_csum(skb_network_offset(skb),
1302 skb->protocol, ip_hdr_len, l4_proto);
1303 }
1304
1305 return MVNETA_TX_L4_CSUM_NOT;
1306}
1307
1308/*
1309 * Returns rx queue pointer (find last set bit) according to causeRxTx
1310 * value
1311 */
1312static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
1313 u32 cause)
1314{
1315 int queue = fls(cause >> 8) - 1;
1316
1317 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
1318}
1319
1320/* Drop packets received by the RXQ and free buffers */
1321static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1322 struct mvneta_rx_queue *rxq)
1323{
1324 int rx_done, i;
1325
1326 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1327 for (i = 0; i < rxq->size; i++) {
1328 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1329 struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
1330
1331 dev_kfree_skb_any(skb);
1332 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1333 rx_desc->data_size, DMA_FROM_DEVICE);
1334 }
1335
1336 if (rx_done)
1337 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1338}
1339
1340/* Main rx processing */
1341static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1342 struct mvneta_rx_queue *rxq)
1343{
1344 struct net_device *dev = pp->dev;
1345 int rx_done, rx_filled;
1346
1347 /* Get number of received packets */
1348 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1349
1350 if (rx_todo > rx_done)
1351 rx_todo = rx_done;
1352
1353 rx_done = 0;
1354 rx_filled = 0;
1355
1356 /* Fairness NAPI loop */
1357 while (rx_done < rx_todo) {
1358 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1359 struct sk_buff *skb;
1360 u32 rx_status;
1361 int rx_bytes, err;
1362
1363 prefetch(rx_desc);
1364 rx_done++;
1365 rx_filled++;
1366 rx_status = rx_desc->status;
1367 skb = (struct sk_buff *)rx_desc->buf_cookie;
1368
1369 if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
1370 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1371 dev->stats.rx_errors++;
1372 mvneta_rx_error(pp, rx_desc);
1373 mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
1374 (u32)skb);
1375 continue;
1376 }
1377
1378 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1379 rx_desc->data_size, DMA_FROM_DEVICE);
1380
1381 rx_bytes = rx_desc->data_size -
1382 (ETH_FCS_LEN + MVNETA_MH_SIZE);
1383 u64_stats_update_begin(&pp->rx_stats.syncp);
1384 pp->rx_stats.packets++;
1385 pp->rx_stats.bytes += rx_bytes;
1386 u64_stats_update_end(&pp->rx_stats.syncp);
1387
1388 /* Linux processing */
1389 skb_reserve(skb, MVNETA_MH_SIZE);
1390 skb_put(skb, rx_bytes);
1391
1392 skb->protocol = eth_type_trans(skb, dev);
1393
1394 mvneta_rx_csum(pp, rx_desc, skb);
1395
1396 napi_gro_receive(&pp->napi, skb);
1397
1398 /* Refill processing */
1399 err = mvneta_rx_refill(pp, rx_desc);
1400 if (err) {
1401 netdev_err(pp->dev, "Linux processing - Can't refill\n");
1402 rxq->missed++;
1403 rx_filled--;
1404 }
1405 }
1406
1407 /* Update rxq management counters */
1408 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
1409
1410 return rx_done;
1411}
1412
1413/* Handle tx fragmentation processing */
1414static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1415 struct mvneta_tx_queue *txq)
1416{
1417 struct mvneta_tx_desc *tx_desc;
1418 int i;
1419
1420 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1421 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1422 void *addr = page_address(frag->page.p) + frag->page_offset;
1423
1424 tx_desc = mvneta_txq_next_desc_get(txq);
1425 tx_desc->data_size = frag->size;
1426
1427 tx_desc->buf_phys_addr =
1428 dma_map_single(pp->dev->dev.parent, addr,
1429 tx_desc->data_size, DMA_TO_DEVICE);
1430
1431 if (dma_mapping_error(pp->dev->dev.parent,
1432 tx_desc->buf_phys_addr)) {
1433 mvneta_txq_desc_put(txq);
1434 goto error;
1435 }
1436
1437 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
1438 /* Last descriptor */
1439 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1440
1441 txq->tx_skb[txq->txq_put_index] = skb;
1442
1443 mvneta_txq_inc_put(txq);
1444 } else {
1445 /* Descriptor in the middle: Not First, Not Last */
1446 tx_desc->command = 0;
1447
1448 txq->tx_skb[txq->txq_put_index] = NULL;
1449 mvneta_txq_inc_put(txq);
1450 }
1451 }
1452
1453 return 0;
1454
1455error:
1456 /* Release all descriptors that were used to map fragments of
1457 * this packet, as well as the corresponding DMA mappings */
1458 for (i = i - 1; i >= 0; i--) {
1459 tx_desc = txq->descs + i;
1460 dma_unmap_single(pp->dev->dev.parent,
1461 tx_desc->buf_phys_addr,
1462 tx_desc->data_size,
1463 DMA_TO_DEVICE);
1464 mvneta_txq_desc_put(txq);
1465 }
1466
1467 return -ENOMEM;
1468}
1469
1470/* Main tx processing */
1471static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1472{
1473 struct mvneta_port *pp = netdev_priv(dev);
1474 struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
1475 struct mvneta_tx_desc *tx_desc;
1476 struct netdev_queue *nq;
1477 int frags = 0;
1478 u32 tx_cmd;
1479
1480 if (!netif_running(dev))
1481 goto out;
1482
1483 frags = skb_shinfo(skb)->nr_frags + 1;
1484 nq = netdev_get_tx_queue(dev, txq_def);
1485
1486 /* Get a descriptor for the first part of the packet */
1487 tx_desc = mvneta_txq_next_desc_get(txq);
1488
1489 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1490
1491 tx_desc->data_size = skb_headlen(skb);
1492
1493 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1494 tx_desc->data_size,
1495 DMA_TO_DEVICE);
1496 if (unlikely(dma_mapping_error(dev->dev.parent,
1497 tx_desc->buf_phys_addr))) {
1498 mvneta_txq_desc_put(txq);
1499 frags = 0;
1500 goto out;
1501 }
1502
1503 if (frags == 1) {
1504 /* First and Last descriptor */
1505 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1506 tx_desc->command = tx_cmd;
1507 txq->tx_skb[txq->txq_put_index] = skb;
1508 mvneta_txq_inc_put(txq);
1509 } else {
1510 /* First but not Last */
1511 tx_cmd |= MVNETA_TXD_F_DESC;
1512 txq->tx_skb[txq->txq_put_index] = NULL;
1513 mvneta_txq_inc_put(txq);
1514 tx_desc->command = tx_cmd;
1515 /* Continue with other skb fragments */
1516 if (mvneta_tx_frag_process(pp, skb, txq)) {
1517 dma_unmap_single(dev->dev.parent,
1518 tx_desc->buf_phys_addr,
1519 tx_desc->data_size,
1520 DMA_TO_DEVICE);
1521 mvneta_txq_desc_put(txq);
1522 frags = 0;
1523 goto out;
1524 }
1525 }
1526
1527 txq->count += frags;
1528 mvneta_txq_pend_desc_add(pp, txq, frags);
1529
1530 if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
1531 netif_tx_stop_queue(nq);
1532
1533out:
1534 if (frags > 0) {
1535 u64_stats_update_begin(&pp->tx_stats.syncp);
1536 pp->tx_stats.packets++;
1537 pp->tx_stats.bytes += skb->len;
1538 u64_stats_update_end(&pp->tx_stats.syncp);
1539
1540 } else {
1541 dev->stats.tx_dropped++;
1542 dev_kfree_skb_any(skb);
1543 }
1544
1545 if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
1546 mvneta_txq_done(pp, txq);
1547
1548 /* If after calling mvneta_txq_done, count equals
1549 frags, we need to set the timer */
1550 if (txq->count == frags && frags > 0)
1551 mvneta_add_tx_done_timer(pp);
1552
1553 return NETDEV_TX_OK;
1554}
1555
1556
1557/* Free tx resources, when resetting a port */
1558static void mvneta_txq_done_force(struct mvneta_port *pp,
1559 struct mvneta_tx_queue *txq)
1560
1561{
1562 int tx_done = txq->count;
1563
1564 mvneta_txq_bufs_free(pp, txq, tx_done);
1565
1566 /* reset txq */
1567 txq->count = 0;
1568 txq->txq_put_index = 0;
1569 txq->txq_get_index = 0;
1570}
1571
1572/* handle tx done - called from tx done timer callback */
1573static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
1574 int *tx_todo)
1575{
1576 struct mvneta_tx_queue *txq;
1577 u32 tx_done = 0;
1578 struct netdev_queue *nq;
1579
1580 *tx_todo = 0;
1581 while (cause_tx_done != 0) {
1582 txq = mvneta_tx_done_policy(pp, cause_tx_done);
1583 if (!txq)
1584 break;
1585
1586 nq = netdev_get_tx_queue(pp->dev, txq->id);
1587 __netif_tx_lock(nq, smp_processor_id());
1588
1589 if (txq->count) {
1590 tx_done += mvneta_txq_done(pp, txq);
1591 *tx_todo += txq->count;
1592 }
1593
1594 __netif_tx_unlock(nq);
1595 cause_tx_done &= ~((1 << txq->id));
1596 }
1597
1598 return tx_done;
1599}
1600
1601/*
1602 * Compute crc8 of the specified address, using a unique algorithm ,
1603 * according to hw spec, different than generic crc8 algorithm
1604 */
1605static int mvneta_addr_crc(unsigned char *addr)
1606{
1607 int crc = 0;
1608 int i;
1609
1610 for (i = 0; i < ETH_ALEN; i++) {
1611 int j;
1612
1613 crc = (crc ^ addr[i]) << 8;
1614 for (j = 7; j >= 0; j--) {
1615 if (crc & (0x100 << j))
1616 crc ^= 0x107 << j;
1617 }
1618 }
1619
1620 return crc;
1621}
1622
1623/* This method controls the net device special MAC multicast support.
1624 * The Special Multicast Table for MAC addresses supports MAC of the form
1625 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1626 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1627 * Table entries in the DA-Filter table. This method set the Special
1628 * Multicast Table appropriate entry.
1629 */
1630static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1631 unsigned char last_byte,
1632 int queue)
1633{
1634 unsigned int smc_table_reg;
1635 unsigned int tbl_offset;
1636 unsigned int reg_offset;
1637
1638 /* Register offset from SMC table base */
1639 tbl_offset = (last_byte / 4);
1640 /* Entry offset within the above reg */
1641 reg_offset = last_byte % 4;
1642
1643 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1644 + tbl_offset * 4));
1645
1646 if (queue == -1)
1647 smc_table_reg &= ~(0xff << (8 * reg_offset));
1648 else {
1649 smc_table_reg &= ~(0xff << (8 * reg_offset));
1650 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1651 }
1652
1653 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1654 smc_table_reg);
1655}
1656
1657/* This method controls the network device Other MAC multicast support.
1658 * The Other Multicast Table is used for multicast of another type.
1659 * A CRC-8 is used as an index to the Other Multicast Table entries
1660 * in the DA-Filter table.
1661 * The method gets the CRC-8 value from the calling routine and
1662 * sets the Other Multicast Table appropriate entry according to the
1663 * specified CRC-8 .
1664 */
1665static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1666 unsigned char crc8,
1667 int queue)
1668{
1669 unsigned int omc_table_reg;
1670 unsigned int tbl_offset;
1671 unsigned int reg_offset;
1672
1673 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1674 reg_offset = crc8 % 4; /* Entry offset within the above reg */
1675
1676 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
1677
1678 if (queue == -1) {
1679 /* Clear accepts frame bit at specified Other DA table entry */
1680 omc_table_reg &= ~(0xff << (8 * reg_offset));
1681 } else {
1682 omc_table_reg &= ~(0xff << (8 * reg_offset));
1683 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1684 }
1685
1686 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
1687}
1688
1689/* The network device supports multicast using two tables:
1690 * 1) Special Multicast Table for MAC addresses of the form
1691 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1692 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1693 * Table entries in the DA-Filter table.
1694 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
1695 * is used as an index to the Other Multicast Table entries in the
1696 * DA-Filter table.
1697 */
1698static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
1699 int queue)
1700{
1701 unsigned char crc_result = 0;
1702
1703 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
1704 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
1705 return 0;
1706 }
1707
1708 crc_result = mvneta_addr_crc(p_addr);
1709 if (queue == -1) {
1710 if (pp->mcast_count[crc_result] == 0) {
1711 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
1712 crc_result);
1713 return -EINVAL;
1714 }
1715
1716 pp->mcast_count[crc_result]--;
1717 if (pp->mcast_count[crc_result] != 0) {
1718 netdev_info(pp->dev,
1719 "After delete there are %d valid Mcast for crc8=0x%02x\n",
1720 pp->mcast_count[crc_result], crc_result);
1721 return -EINVAL;
1722 }
1723 } else
1724 pp->mcast_count[crc_result]++;
1725
1726 mvneta_set_other_mcast_addr(pp, crc_result, queue);
1727
1728 return 0;
1729}
1730
1731/* Configure Fitering mode of Ethernet port */
1732static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
1733 int is_promisc)
1734{
1735 u32 port_cfg_reg, val;
1736
1737 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
1738
1739 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
1740
1741 /* Set / Clear UPM bit in port configuration register */
1742 if (is_promisc) {
1743 /* Accept all Unicast addresses */
1744 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
1745 val |= MVNETA_FORCE_UNI;
1746 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
1747 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
1748 } else {
1749 /* Reject all Unicast addresses */
1750 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
1751 val &= ~MVNETA_FORCE_UNI;
1752 }
1753
1754 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
1755 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
1756}
1757
1758/* register unicast and multicast addresses */
1759static void mvneta_set_rx_mode(struct net_device *dev)
1760{
1761 struct mvneta_port *pp = netdev_priv(dev);
1762 struct netdev_hw_addr *ha;
1763
1764 if (dev->flags & IFF_PROMISC) {
1765 /* Accept all: Multicast + Unicast */
1766 mvneta_rx_unicast_promisc_set(pp, 1);
1767 mvneta_set_ucast_table(pp, rxq_def);
1768 mvneta_set_special_mcast_table(pp, rxq_def);
1769 mvneta_set_other_mcast_table(pp, rxq_def);
1770 } else {
1771 /* Accept single Unicast */
1772 mvneta_rx_unicast_promisc_set(pp, 0);
1773 mvneta_set_ucast_table(pp, -1);
1774 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
1775
1776 if (dev->flags & IFF_ALLMULTI) {
1777 /* Accept all multicast */
1778 mvneta_set_special_mcast_table(pp, rxq_def);
1779 mvneta_set_other_mcast_table(pp, rxq_def);
1780 } else {
1781 /* Accept only initialized multicast */
1782 mvneta_set_special_mcast_table(pp, -1);
1783 mvneta_set_other_mcast_table(pp, -1);
1784
1785 if (!netdev_mc_empty(dev)) {
1786 netdev_for_each_mc_addr(ha, dev) {
1787 mvneta_mcast_addr_set(pp, ha->addr,
1788 rxq_def);
1789 }
1790 }
1791 }
1792 }
1793}
1794
1795/* Interrupt handling - the callback for request_irq() */
1796static irqreturn_t mvneta_isr(int irq, void *dev_id)
1797{
1798 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
1799
1800 /* Mask all interrupts */
1801 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1802
1803 napi_schedule(&pp->napi);
1804
1805 return IRQ_HANDLED;
1806}
1807
1808/* NAPI handler
1809 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
1810 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
1811 * Bits 8 -15 of the cause Rx Tx register indicate that are received
1812 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
1813 * Each CPU has its own causeRxTx register
1814 */
1815static int mvneta_poll(struct napi_struct *napi, int budget)
1816{
1817 int rx_done = 0;
1818 u32 cause_rx_tx;
1819 unsigned long flags;
1820 struct mvneta_port *pp = netdev_priv(napi->dev);
1821
1822 if (!netif_running(pp->dev)) {
1823 napi_complete(napi);
1824 return rx_done;
1825 }
1826
1827 /* Read cause register */
1828 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
1829 MVNETA_RX_INTR_MASK(rxq_number);
1830
1831 /*
1832 * For the case where the last mvneta_poll did not process all
1833 * RX packets
1834 */
1835 cause_rx_tx |= pp->cause_rx_tx;
1836 if (rxq_number > 1) {
1837 while ((cause_rx_tx != 0) && (budget > 0)) {
1838 int count;
1839 struct mvneta_rx_queue *rxq;
1840 /* get rx queue number from cause_rx_tx */
1841 rxq = mvneta_rx_policy(pp, cause_rx_tx);
1842 if (!rxq)
1843 break;
1844
1845 /* process the packet in that rx queue */
1846 count = mvneta_rx(pp, budget, rxq);
1847 rx_done += count;
1848 budget -= count;
1849 if (budget > 0) {
1850 /* set off the rx bit of the corresponding bit
1851 in the cause rx tx register, so that next
1852 iteration will find the next rx queue where
1853 packets are received on */
1854 cause_rx_tx &= ~((1 << rxq->id) << 8);
1855 }
1856 }
1857 } else {
1858 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
1859 budget -= rx_done;
1860 }
1861
1862 if (budget > 0) {
1863 cause_rx_tx = 0;
1864 napi_complete(napi);
1865 local_irq_save(flags);
1866 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1867 MVNETA_RX_INTR_MASK(rxq_number));
1868 local_irq_restore(flags);
1869 }
1870
1871 pp->cause_rx_tx = cause_rx_tx;
1872 return rx_done;
1873}
1874
1875/* tx done timer callback */
1876static void mvneta_tx_done_timer_callback(unsigned long data)
1877{
1878 struct net_device *dev = (struct net_device *)data;
1879 struct mvneta_port *pp = netdev_priv(dev);
1880 int tx_done = 0, tx_todo = 0;
1881
1882 if (!netif_running(dev))
1883 return ;
1884
1885 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
1886
1887 tx_done = mvneta_tx_done_gbe(pp,
1888 (((1 << txq_number) - 1) &
1889 MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
1890 &tx_todo);
1891 if (tx_todo > 0)
1892 mvneta_add_tx_done_timer(pp);
1893}
1894
1895/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
1896static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
1897 int num)
1898{
1899 struct net_device *dev = pp->dev;
1900 int i;
1901
1902 for (i = 0; i < num; i++) {
1903 struct sk_buff *skb;
1904 struct mvneta_rx_desc *rx_desc;
1905 unsigned long phys_addr;
1906
1907 skb = dev_alloc_skb(pp->pkt_size);
1908 if (!skb) {
1909 netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
1910 __func__, rxq->id, i, num);
1911 break;
1912 }
1913
1914 rx_desc = rxq->descs + i;
1915 memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
1916 phys_addr = dma_map_single(dev->dev.parent, skb->head,
1917 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1918 DMA_FROM_DEVICE);
1919 if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
1920 dev_kfree_skb(skb);
1921 break;
1922 }
1923
1924 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
1925 }
1926
1927 /* Add this number of RX descriptors as non occupied (ready to
1928 get packets) */
1929 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
1930
1931 return i;
1932}
1933
1934/* Free all packets pending transmit from all TXQs and reset TX port */
1935static void mvneta_tx_reset(struct mvneta_port *pp)
1936{
1937 int queue;
1938
1939 /* free the skb's in the hal tx ring */
1940 for (queue = 0; queue < txq_number; queue++)
1941 mvneta_txq_done_force(pp, &pp->txqs[queue]);
1942
1943 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1944 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1945}
1946
1947static void mvneta_rx_reset(struct mvneta_port *pp)
1948{
1949 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1950 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1951}
1952
1953/* Rx/Tx queue initialization/cleanup methods */
1954
1955/* Create a specified RX queue */
1956static int mvneta_rxq_init(struct mvneta_port *pp,
1957 struct mvneta_rx_queue *rxq)
1958
1959{
1960 rxq->size = pp->rx_ring_size;
1961
1962 /* Allocate memory for RX descriptors */
1963 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
1964 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
1965 &rxq->descs_phys, GFP_KERNEL);
1966 if (rxq->descs == NULL) {
1967 netdev_err(pp->dev,
1968 "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
1969 rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
1970 rxq->size);
1971 return -ENOMEM;
1972 }
1973
1974 BUG_ON(rxq->descs !=
1975 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
1976
1977 rxq->last_desc = rxq->size - 1;
1978
1979 /* Set Rx descriptors queue starting address */
1980 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
1981 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
1982
1983 /* Set Offset */
1984 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
1985
1986 /* Set coalescing pkts and time */
1987 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
1988 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
1989
1990 /* Fill RXQ with buffers from RX pool */
1991 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
1992 mvneta_rxq_bm_disable(pp, rxq);
1993 mvneta_rxq_fill(pp, rxq, rxq->size);
1994
1995 return 0;
1996}
1997
1998/* Cleanup Rx queue */
1999static void mvneta_rxq_deinit(struct mvneta_port *pp,
2000 struct mvneta_rx_queue *rxq)
2001{
2002 mvneta_rxq_drop_pkts(pp, rxq);
2003
2004 if (rxq->descs)
2005 dma_free_coherent(pp->dev->dev.parent,
2006 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2007 rxq->descs,
2008 rxq->descs_phys);
2009
2010 rxq->descs = NULL;
2011 rxq->last_desc = 0;
2012 rxq->next_desc_to_proc = 0;
2013 rxq->descs_phys = 0;
2014}
2015
2016/* Create and initialize a tx queue */
2017static int mvneta_txq_init(struct mvneta_port *pp,
2018 struct mvneta_tx_queue *txq)
2019{
2020 txq->size = pp->tx_ring_size;
2021
2022 /* Allocate memory for TX descriptors */
2023 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2024 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2025 &txq->descs_phys, GFP_KERNEL);
2026 if (txq->descs == NULL) {
2027 netdev_err(pp->dev,
2028 "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
2029 txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
2030 txq->size);
2031 return -ENOMEM;
2032 }
2033
2034 /* Make sure descriptor address is cache line size aligned */
2035 BUG_ON(txq->descs !=
2036 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2037
2038 txq->last_desc = txq->size - 1;
2039
2040 /* Set maximum bandwidth for enabled TXQs */
2041 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2042 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2043
2044 /* Set Tx descriptors queue starting address */
2045 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2046 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2047
2048 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2049 if (txq->tx_skb == NULL) {
2050 dma_free_coherent(pp->dev->dev.parent,
2051 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2052 txq->descs, txq->descs_phys);
2053 return -ENOMEM;
2054 }
2055 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2056
2057 return 0;
2058}
2059
2060/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2061static void mvneta_txq_deinit(struct mvneta_port *pp,
2062 struct mvneta_tx_queue *txq)
2063{
2064 kfree(txq->tx_skb);
2065
2066 if (txq->descs)
2067 dma_free_coherent(pp->dev->dev.parent,
2068 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2069 txq->descs, txq->descs_phys);
2070
2071 txq->descs = NULL;
2072 txq->last_desc = 0;
2073 txq->next_desc_to_proc = 0;
2074 txq->descs_phys = 0;
2075
2076 /* Set minimum bandwidth for disabled TXQs */
2077 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2078 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2079
2080 /* Set Tx descriptors queue starting address and size */
2081 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2082 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2083}
2084
2085/* Cleanup all Tx queues */
2086static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2087{
2088 int queue;
2089
2090 for (queue = 0; queue < txq_number; queue++)
2091 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2092}
2093
2094/* Cleanup all Rx queues */
2095static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2096{
2097 int queue;
2098
2099 for (queue = 0; queue < rxq_number; queue++)
2100 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
2101}
2102
2103
2104/* Init all Rx queues */
2105static int mvneta_setup_rxqs(struct mvneta_port *pp)
2106{
2107 int queue;
2108
2109 for (queue = 0; queue < rxq_number; queue++) {
2110 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2111 if (err) {
2112 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2113 __func__, queue);
2114 mvneta_cleanup_rxqs(pp);
2115 return err;
2116 }
2117 }
2118
2119 return 0;
2120}
2121
2122/* Init all tx queues */
2123static int mvneta_setup_txqs(struct mvneta_port *pp)
2124{
2125 int queue;
2126
2127 for (queue = 0; queue < txq_number; queue++) {
2128 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2129 if (err) {
2130 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2131 __func__, queue);
2132 mvneta_cleanup_txqs(pp);
2133 return err;
2134 }
2135 }
2136
2137 return 0;
2138}
2139
2140static void mvneta_start_dev(struct mvneta_port *pp)
2141{
2142 mvneta_max_rx_size_set(pp, pp->pkt_size);
2143 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2144
2145 /* start the Rx/Tx activity */
2146 mvneta_port_enable(pp);
2147
2148 /* Enable polling on the port */
2149 napi_enable(&pp->napi);
2150
2151 /* Unmask interrupts */
2152 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2153 MVNETA_RX_INTR_MASK(rxq_number));
2154
2155 phy_start(pp->phy_dev);
2156 netif_tx_start_all_queues(pp->dev);
2157}
2158
2159static void mvneta_stop_dev(struct mvneta_port *pp)
2160{
2161 phy_stop(pp->phy_dev);
2162
2163 napi_disable(&pp->napi);
2164
2165 netif_carrier_off(pp->dev);
2166
2167 mvneta_port_down(pp);
2168 netif_tx_stop_all_queues(pp->dev);
2169
2170 /* Stop the port activity */
2171 mvneta_port_disable(pp);
2172
2173 /* Clear all ethernet port interrupts */
2174 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2175 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2176
2177 /* Mask all ethernet port interrupts */
2178 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2179 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2180 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2181
2182 mvneta_tx_reset(pp);
2183 mvneta_rx_reset(pp);
2184}
2185
2186/* tx timeout callback - display a message and stop/start the network device */
2187static void mvneta_tx_timeout(struct net_device *dev)
2188{
2189 struct mvneta_port *pp = netdev_priv(dev);
2190
2191 netdev_info(dev, "tx timeout\n");
2192 mvneta_stop_dev(pp);
2193 mvneta_start_dev(pp);
2194}
2195
2196/* Return positive if MTU is valid */
2197static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2198{
2199 if (mtu < 68) {
2200 netdev_err(dev, "cannot change mtu to less than 68\n");
2201 return -EINVAL;
2202 }
2203
2204 /* 9676 == 9700 - 20 and rounding to 8 */
2205 if (mtu > 9676) {
2206 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2207 mtu = 9676;
2208 }
2209
2210 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2211 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2212 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2213 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2214 }
2215
2216 return mtu;
2217}
2218
2219/* Change the device mtu */
2220static int mvneta_change_mtu(struct net_device *dev, int mtu)
2221{
2222 struct mvneta_port *pp = netdev_priv(dev);
2223 int ret;
2224
2225 mtu = mvneta_check_mtu_valid(dev, mtu);
2226 if (mtu < 0)
2227 return -EINVAL;
2228
2229 dev->mtu = mtu;
2230
2231 if (!netif_running(dev))
2232 return 0;
2233
2234 /*
2235 * The interface is running, so we have to force a
2236 * reallocation of the RXQs
2237 */
2238 mvneta_stop_dev(pp);
2239
2240 mvneta_cleanup_txqs(pp);
2241 mvneta_cleanup_rxqs(pp);
2242
2243 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2244
2245 ret = mvneta_setup_rxqs(pp);
2246 if (ret) {
2247 netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
2248 return ret;
2249 }
2250
2251 mvneta_setup_txqs(pp);
2252
2253 mvneta_start_dev(pp);
2254 mvneta_port_up(pp);
2255
2256 return 0;
2257}
2258
2259/* Handle setting mac address */
2260static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2261{
2262 struct mvneta_port *pp = netdev_priv(dev);
2263 u8 *mac = addr + 2;
2264 int i;
2265
2266 if (netif_running(dev))
2267 return -EBUSY;
2268
2269 /* Remove previous address table entry */
2270 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2271
2272 /* Set new addr in hw */
2273 mvneta_mac_addr_set(pp, mac, rxq_def);
2274
2275 /* Set addr in the device */
2276 for (i = 0; i < ETH_ALEN; i++)
2277 dev->dev_addr[i] = mac[i];
2278
2279 return 0;
2280}
2281
2282static void mvneta_adjust_link(struct net_device *ndev)
2283{
2284 struct mvneta_port *pp = netdev_priv(ndev);
2285 struct phy_device *phydev = pp->phy_dev;
2286 int status_change = 0;
2287
2288 if (phydev->link) {
2289 if ((pp->speed != phydev->speed) ||
2290 (pp->duplex != phydev->duplex)) {
2291 u32 val;
2292
2293 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2294 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2295 MVNETA_GMAC_CONFIG_GMII_SPEED |
2296 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
2297
2298 if (phydev->duplex)
2299 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2300
2301 if (phydev->speed == SPEED_1000)
2302 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2303 else
2304 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2305
2306 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2307
2308 pp->duplex = phydev->duplex;
2309 pp->speed = phydev->speed;
2310 }
2311 }
2312
2313 if (phydev->link != pp->link) {
2314 if (!phydev->link) {
2315 pp->duplex = -1;
2316 pp->speed = 0;
2317 }
2318
2319 pp->link = phydev->link;
2320 status_change = 1;
2321 }
2322
2323 if (status_change) {
2324 if (phydev->link) {
2325 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2326 val |= (MVNETA_GMAC_FORCE_LINK_PASS |
2327 MVNETA_GMAC_FORCE_LINK_DOWN);
2328 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2329 mvneta_port_up(pp);
2330 netdev_info(pp->dev, "link up\n");
2331 } else {
2332 mvneta_port_down(pp);
2333 netdev_info(pp->dev, "link down\n");
2334 }
2335 }
2336}
2337
2338static int mvneta_mdio_probe(struct mvneta_port *pp)
2339{
2340 struct phy_device *phy_dev;
2341
2342 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2343 pp->phy_interface);
2344 if (!phy_dev) {
2345 netdev_err(pp->dev, "could not find the PHY\n");
2346 return -ENODEV;
2347 }
2348
2349 phy_dev->supported &= PHY_GBIT_FEATURES;
2350 phy_dev->advertising = phy_dev->supported;
2351
2352 pp->phy_dev = phy_dev;
2353 pp->link = 0;
2354 pp->duplex = 0;
2355 pp->speed = 0;
2356
2357 return 0;
2358}
2359
2360static void mvneta_mdio_remove(struct mvneta_port *pp)
2361{
2362 phy_disconnect(pp->phy_dev);
2363 pp->phy_dev = NULL;
2364}
2365
2366static int mvneta_open(struct net_device *dev)
2367{
2368 struct mvneta_port *pp = netdev_priv(dev);
2369 int ret;
2370
2371 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2372
2373 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2374
2375 ret = mvneta_setup_rxqs(pp);
2376 if (ret)
2377 return ret;
2378
2379 ret = mvneta_setup_txqs(pp);
2380 if (ret)
2381 goto err_cleanup_rxqs;
2382
2383 /* Connect to port interrupt line */
2384 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
2385 MVNETA_DRIVER_NAME, pp);
2386 if (ret) {
2387 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2388 goto err_cleanup_txqs;
2389 }
2390
2391 /* In default link is down */
2392 netif_carrier_off(pp->dev);
2393
2394 ret = mvneta_mdio_probe(pp);
2395 if (ret < 0) {
2396 netdev_err(dev, "cannot probe MDIO bus\n");
2397 goto err_free_irq;
2398 }
2399
2400 mvneta_start_dev(pp);
2401
2402 return 0;
2403
2404err_free_irq:
2405 free_irq(pp->dev->irq, pp);
2406err_cleanup_txqs:
2407 mvneta_cleanup_txqs(pp);
2408err_cleanup_rxqs:
2409 mvneta_cleanup_rxqs(pp);
2410 return ret;
2411}
2412
2413/* Stop the port, free port interrupt line */
2414static int mvneta_stop(struct net_device *dev)
2415{
2416 struct mvneta_port *pp = netdev_priv(dev);
2417
2418 mvneta_stop_dev(pp);
2419 mvneta_mdio_remove(pp);
2420 free_irq(dev->irq, pp);
2421 mvneta_cleanup_rxqs(pp);
2422 mvneta_cleanup_txqs(pp);
2423 del_timer(&pp->tx_done_timer);
2424 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2425
2426 return 0;
2427}
2428
2429/* Ethtool methods */
2430
2431/* Get settings (phy address, speed) for ethtools */
2432int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2433{
2434 struct mvneta_port *pp = netdev_priv(dev);
2435
2436 if (!pp->phy_dev)
2437 return -ENODEV;
2438
2439 return phy_ethtool_gset(pp->phy_dev, cmd);
2440}
2441
2442/* Set settings (phy address, speed) for ethtools */
2443int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2444{
2445 struct mvneta_port *pp = netdev_priv(dev);
2446
2447 if (!pp->phy_dev)
2448 return -ENODEV;
2449
2450 return phy_ethtool_sset(pp->phy_dev, cmd);
2451}
2452
2453/* Set interrupt coalescing for ethtools */
2454static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2455 struct ethtool_coalesce *c)
2456{
2457 struct mvneta_port *pp = netdev_priv(dev);
2458 int queue;
2459
2460 for (queue = 0; queue < rxq_number; queue++) {
2461 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2462 rxq->time_coal = c->rx_coalesce_usecs;
2463 rxq->pkts_coal = c->rx_max_coalesced_frames;
2464 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2465 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2466 }
2467
2468 for (queue = 0; queue < txq_number; queue++) {
2469 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2470 txq->done_pkts_coal = c->tx_max_coalesced_frames;
2471 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2472 }
2473
2474 return 0;
2475}
2476
2477/* get coalescing for ethtools */
2478static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2479 struct ethtool_coalesce *c)
2480{
2481 struct mvneta_port *pp = netdev_priv(dev);
2482
2483 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
2484 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
2485
2486 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
2487 return 0;
2488}
2489
2490
2491static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2492 struct ethtool_drvinfo *drvinfo)
2493{
2494 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
2495 sizeof(drvinfo->driver));
2496 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
2497 sizeof(drvinfo->version));
2498 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
2499 sizeof(drvinfo->bus_info));
2500}
2501
2502
2503static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
2504 struct ethtool_ringparam *ring)
2505{
2506 struct mvneta_port *pp = netdev_priv(netdev);
2507
2508 ring->rx_max_pending = MVNETA_MAX_RXD;
2509 ring->tx_max_pending = MVNETA_MAX_TXD;
2510 ring->rx_pending = pp->rx_ring_size;
2511 ring->tx_pending = pp->tx_ring_size;
2512}
2513
2514static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2515 struct ethtool_ringparam *ring)
2516{
2517 struct mvneta_port *pp = netdev_priv(dev);
2518
2519 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
2520 return -EINVAL;
2521 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2522 ring->rx_pending : MVNETA_MAX_RXD;
2523 pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
2524 ring->tx_pending : MVNETA_MAX_TXD;
2525
2526 if (netif_running(dev)) {
2527 mvneta_stop(dev);
2528 if (mvneta_open(dev)) {
2529 netdev_err(dev,
2530 "error on opening device after ring param change\n");
2531 return -ENOMEM;
2532 }
2533 }
2534
2535 return 0;
2536}
2537
2538static const struct net_device_ops mvneta_netdev_ops = {
2539 .ndo_open = mvneta_open,
2540 .ndo_stop = mvneta_stop,
2541 .ndo_start_xmit = mvneta_tx,
2542 .ndo_set_rx_mode = mvneta_set_rx_mode,
2543 .ndo_set_mac_address = mvneta_set_mac_addr,
2544 .ndo_change_mtu = mvneta_change_mtu,
2545 .ndo_tx_timeout = mvneta_tx_timeout,
2546 .ndo_get_stats64 = mvneta_get_stats64,
2547};
2548
2549const struct ethtool_ops mvneta_eth_tool_ops = {
2550 .get_link = ethtool_op_get_link,
2551 .get_settings = mvneta_ethtool_get_settings,
2552 .set_settings = mvneta_ethtool_set_settings,
2553 .set_coalesce = mvneta_ethtool_set_coalesce,
2554 .get_coalesce = mvneta_ethtool_get_coalesce,
2555 .get_drvinfo = mvneta_ethtool_get_drvinfo,
2556 .get_ringparam = mvneta_ethtool_get_ringparam,
2557 .set_ringparam = mvneta_ethtool_set_ringparam,
2558};
2559
2560/* Initialize hw */
2561static int __devinit mvneta_init(struct mvneta_port *pp, int phy_addr)
2562{
2563 int queue;
2564
2565 /* Disable port */
2566 mvneta_port_disable(pp);
2567
2568 /* Set port default values */
2569 mvneta_defaults_set(pp);
2570
2571 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
2572 GFP_KERNEL);
2573 if (!pp->txqs)
2574 return -ENOMEM;
2575
2576 /* Initialize TX descriptor rings */
2577 for (queue = 0; queue < txq_number; queue++) {
2578 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2579 txq->id = queue;
2580 txq->size = pp->tx_ring_size;
2581 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2582 }
2583
2584 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
2585 GFP_KERNEL);
2586 if (!pp->rxqs) {
2587 kfree(pp->txqs);
2588 return -ENOMEM;
2589 }
2590
2591 /* Create Rx descriptor rings */
2592 for (queue = 0; queue < rxq_number; queue++) {
2593 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2594 rxq->id = queue;
2595 rxq->size = pp->rx_ring_size;
2596 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
2597 rxq->time_coal = MVNETA_RX_COAL_USEC;
2598 }
2599
2600 return 0;
2601}
2602
2603static void __devexit mvneta_deinit(struct mvneta_port *pp)
2604{
2605 kfree(pp->txqs);
2606 kfree(pp->rxqs);
2607}
2608
2609/* platform glue : initialize decoding windows */
2610static void __devinit
2611mvneta_conf_mbus_windows(struct mvneta_port *pp,
2612 const struct mbus_dram_target_info *dram)
2613{
2614 u32 win_enable;
2615 u32 win_protect;
2616 int i;
2617
2618 for (i = 0; i < 6; i++) {
2619 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
2620 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
2621
2622 if (i < 4)
2623 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
2624 }
2625
2626 win_enable = 0x3f;
2627 win_protect = 0;
2628
2629 for (i = 0; i < dram->num_cs; i++) {
2630 const struct mbus_dram_window *cs = dram->cs + i;
2631 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
2632 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
2633
2634 mvreg_write(pp, MVNETA_WIN_SIZE(i),
2635 (cs->size - 1) & 0xffff0000);
2636
2637 win_enable &= ~(1 << i);
2638 win_protect |= 3 << (2 * i);
2639 }
2640
2641 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
2642}
2643
2644/* Power up the port */
2645static void __devinit mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2646{
2647 u32 val;
2648
2649 /* MAC Cause register should be cleared */
2650 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2651
2652 if (phy_mode == PHY_INTERFACE_MODE_SGMII)
2653 mvneta_port_sgmii_config(pp);
2654
2655 mvneta_gmac_rgmii_set(pp, 1);
2656
2657 /* Cancel Port Reset */
2658 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2659 val &= ~MVNETA_GMAC2_PORT_RESET;
2660 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
2661
2662 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
2663 MVNETA_GMAC2_PORT_RESET) != 0)
2664 continue;
2665}
2666
2667/* Device initialization routine */
2668static int __devinit mvneta_probe(struct platform_device *pdev)
2669{
2670 const struct mbus_dram_target_info *dram_target_info;
2671 struct device_node *dn = pdev->dev.of_node;
2672 struct device_node *phy_node;
2673 u32 phy_addr, clk_rate_hz;
2674 struct mvneta_port *pp;
2675 struct net_device *dev;
2676 const char *mac_addr;
2677 int phy_mode;
2678 int err;
2679
2680 /*
2681 * Our multiqueue support is not complete, so for now, only
2682 * allow the usage of the first RX queue
2683 */
2684 if (rxq_def != 0) {
2685 dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
2686 return -EINVAL;
2687 }
2688
2689 dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
2690 if (!dev)
2691 return -ENOMEM;
2692
2693 dev->irq = irq_of_parse_and_map(dn, 0);
2694 if (dev->irq == 0) {
2695 err = -EINVAL;
2696 goto err_free_netdev;
2697 }
2698
2699 phy_node = of_parse_phandle(dn, "phy", 0);
2700 if (!phy_node) {
2701 dev_err(&pdev->dev, "no associated PHY\n");
2702 err = -ENODEV;
2703 goto err_free_irq;
2704 }
2705
2706 phy_mode = of_get_phy_mode(dn);
2707 if (phy_mode < 0) {
2708 dev_err(&pdev->dev, "incorrect phy-mode\n");
2709 err = -EINVAL;
2710 goto err_free_irq;
2711 }
2712
2713 if (of_property_read_u32(dn, "clock-frequency", &clk_rate_hz) != 0) {
2714 dev_err(&pdev->dev, "could not read clock-frequency\n");
2715 err = -EINVAL;
2716 goto err_free_irq;
2717 }
2718
2719 mac_addr = of_get_mac_address(dn);
2720
2721 if (!mac_addr || !is_valid_ether_addr(mac_addr))
2722 eth_hw_addr_random(dev);
2723 else
2724 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
2725
2726 dev->tx_queue_len = MVNETA_MAX_TXD;
2727 dev->watchdog_timeo = 5 * HZ;
2728 dev->netdev_ops = &mvneta_netdev_ops;
2729
2730 SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
2731
2732 pp = netdev_priv(dev);
2733
2734 pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
2735 init_timer(&pp->tx_done_timer);
2736 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2737
2738 pp->weight = MVNETA_RX_POLL_WEIGHT;
2739 pp->clk_rate_hz = clk_rate_hz;
2740 pp->phy_node = phy_node;
2741 pp->phy_interface = phy_mode;
2742
2743 pp->base = of_iomap(dn, 0);
2744 if (pp->base == NULL) {
2745 err = -ENOMEM;
2746 goto err_free_irq;
2747 }
2748
2749 pp->tx_done_timer.data = (unsigned long)dev;
2750
2751 pp->tx_ring_size = MVNETA_MAX_TXD;
2752 pp->rx_ring_size = MVNETA_MAX_RXD;
2753
2754 pp->dev = dev;
2755 SET_NETDEV_DEV(dev, &pdev->dev);
2756
2757 err = mvneta_init(pp, phy_addr);
2758 if (err < 0) {
2759 dev_err(&pdev->dev, "can't init eth hal\n");
2760 goto err_unmap;
2761 }
2762 mvneta_port_power_up(pp, phy_mode);
2763
2764 dram_target_info = mv_mbus_dram_info();
2765 if (dram_target_info)
2766 mvneta_conf_mbus_windows(pp, dram_target_info);
2767
2768 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
2769
2770 err = register_netdev(dev);
2771 if (err < 0) {
2772 dev_err(&pdev->dev, "failed to register\n");
2773 goto err_deinit;
2774 }
2775
2776 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2777 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2778 dev->priv_flags |= IFF_UNICAST_FLT;
2779
2780 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
2781
2782 platform_set_drvdata(pdev, pp->dev);
2783
2784 return 0;
2785
2786err_deinit:
2787 mvneta_deinit(pp);
2788err_unmap:
2789 iounmap(pp->base);
2790err_free_irq:
2791 irq_dispose_mapping(dev->irq);
2792err_free_netdev:
2793 free_netdev(dev);
2794 return err;
2795}
2796
2797/* Device removal routine */
2798static int __devexit mvneta_remove(struct platform_device *pdev)
2799{
2800 struct net_device *dev = platform_get_drvdata(pdev);
2801 struct mvneta_port *pp = netdev_priv(dev);
2802
2803 unregister_netdev(dev);
2804 mvneta_deinit(pp);
2805 iounmap(pp->base);
2806 irq_dispose_mapping(dev->irq);
2807 free_netdev(dev);
2808
2809 platform_set_drvdata(pdev, NULL);
2810
2811 return 0;
2812}
2813
2814static const struct of_device_id mvneta_match[] = {
2815 { .compatible = "marvell,armada-370-neta" },
2816 { }
2817};
2818MODULE_DEVICE_TABLE(of, mvneta_match);
2819
2820static struct platform_driver mvneta_driver = {
2821 .probe = mvneta_probe,
2822 .remove = __devexit_p(mvneta_remove),
2823 .driver = {
2824 .name = MVNETA_DRIVER_NAME,
2825 .of_match_table = mvneta_match,
2826 },
2827};
2828
2829module_platform_driver(mvneta_driver);
2830
2831MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
2832MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
2833MODULE_LICENSE("GPL");
2834
2835module_param(rxq_number, int, S_IRUGO);
2836module_param(txq_number, int, S_IRUGO);
2837
2838module_param(rxq_def, int, S_IRUGO);
2839module_param(txq_def, int, S_IRUGO);