2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
22 #include <linux/sched.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <linux/delay.h>
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/crc32.h>
31 #include <linux/ethtool.h>
32 #include <linux/mii.h>
33 #include <linux/bitops.h>
34 #include <linux/workqueue.h>
36 #include <asm/processor.h>
39 #include <asm/uaccess.h>
44 * Lack of dma_unmap_???? calls is intentional.
46 * API-correct usage requires additional support state information to be
47 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
48 * EMAC design (e.g. TX buffer passed from network stack can be split into
49 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
50 * maintaining such information will add additional overhead.
51 * Current DMA API implementation for 4xx processors only ensures cache coherency
52 * and dma_unmap_???? routines are empty and are likely to stay this way.
53 * I decided to omit dma_unmap_??? calls because I don't want to add additional
54 * complexity just for the sake of following some abstract API, when it doesn't
55 * add any real benefit to the driver. I understand that this decision maybe
56 * controversial, but I really tried to make code API-correct and efficient
57 * at the same time and didn't come up with code I liked :(. --ebs
60 #define DRV_NAME "emac"
61 #define DRV_VERSION "3.54"
62 #define DRV_DESC "PPC 4xx OCP EMAC driver"
64 MODULE_DESCRIPTION(DRV_DESC
);
66 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
67 MODULE_LICENSE("GPL");
70 * PPC64 doesn't (yet) have a cacheable_memcpy
73 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
76 /* minimum number of free TX descriptors required to wake up TX process */
77 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
79 /* If packet size is less than this number, we allocate small skb and copy packet
80 * contents into it instead of just sending original big skb up
82 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
84 /* Since multiple EMACs share MDIO lines in various ways, we need
85 * to avoid re-using the same PHY ID in cases where the arch didn't
86 * setup precise phy_map entries
88 * XXX This is something that needs to be reworked as we can have multiple
89 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
90 * probably require in that case to have explicit PHY IDs in the device-tree
92 static u32 busy_phy_map
;
93 static DEFINE_MUTEX(emac_phy_map_lock
);
95 /* This is the wait queue used to wait on any event related to probe, that
96 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
98 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait
);
100 /* Having stable interface names is a doomed idea. However, it would be nice
101 * if we didn't have completely random interface names at boot too :-) It's
102 * just a matter of making everybody's life easier. Since we are doing
103 * threaded probing, it's a bit harder though. The base idea here is that
104 * we make up a list of all emacs in the device-tree before we register the
105 * driver. Every emac will then wait for the previous one in the list to
106 * initialize before itself. We should also keep that list ordered by
108 * That list is only 4 entries long, meaning that additional EMACs don't
109 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
112 #define EMAC_BOOT_LIST_SIZE 4
113 static struct device_node
*emac_boot_list
[EMAC_BOOT_LIST_SIZE
];
115 /* How long should I wait for dependent devices ? */
116 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
118 /* I don't want to litter system log with timeout errors
119 * when we have brain-damaged PHY.
121 static inline void emac_report_timeout_error(struct emac_instance
*dev
,
125 printk(KERN_ERR
"%s: %s\n", dev
->ndev
->name
, error
);
128 /* PHY polling intervals */
129 #define PHY_POLL_LINK_ON HZ
130 #define PHY_POLL_LINK_OFF (HZ / 5)
132 /* Graceful stop timeouts in us.
133 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
135 #define STOP_TIMEOUT_10 1230
136 #define STOP_TIMEOUT_100 124
137 #define STOP_TIMEOUT_1000 13
138 #define STOP_TIMEOUT_1000_JUMBO 73
140 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
141 static const char emac_stats_keys
[EMAC_ETHTOOL_STATS_COUNT
][ETH_GSTRING_LEN
] = {
142 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
143 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
144 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
145 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
146 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
147 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
148 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
149 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
150 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
151 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
152 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
153 "tx_bd_excessive_collisions", "tx_bd_late_collision",
154 "tx_bd_multple_collisions", "tx_bd_single_collision",
155 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
159 static irqreturn_t
emac_irq(int irq
, void *dev_instance
);
160 static void emac_clean_tx_ring(struct emac_instance
*dev
);
161 static void __emac_set_multicast_list(struct emac_instance
*dev
);
163 static inline int emac_phy_supports_gige(int phy_mode
)
165 return phy_mode
== PHY_MODE_GMII
||
166 phy_mode
== PHY_MODE_RGMII
||
167 phy_mode
== PHY_MODE_TBI
||
168 phy_mode
== PHY_MODE_RTBI
;
171 static inline int emac_phy_gpcs(int phy_mode
)
173 return phy_mode
== PHY_MODE_TBI
||
174 phy_mode
== PHY_MODE_RTBI
;
177 static inline void emac_tx_enable(struct emac_instance
*dev
)
179 struct emac_regs __iomem
*p
= dev
->emacp
;
182 DBG(dev
, "tx_enable" NL
);
184 r
= in_be32(&p
->mr0
);
185 if (!(r
& EMAC_MR0_TXE
))
186 out_be32(&p
->mr0
, r
| EMAC_MR0_TXE
);
189 static void emac_tx_disable(struct emac_instance
*dev
)
191 struct emac_regs __iomem
*p
= dev
->emacp
;
194 DBG(dev
, "tx_disable" NL
);
196 r
= in_be32(&p
->mr0
);
197 if (r
& EMAC_MR0_TXE
) {
198 int n
= dev
->stop_timeout
;
199 out_be32(&p
->mr0
, r
& ~EMAC_MR0_TXE
);
200 while (!(in_be32(&p
->mr0
) & EMAC_MR0_TXI
) && n
) {
205 emac_report_timeout_error(dev
, "TX disable timeout");
209 static void emac_rx_enable(struct emac_instance
*dev
)
211 struct emac_regs __iomem
*p
= dev
->emacp
;
214 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
)))
217 DBG(dev
, "rx_enable" NL
);
219 r
= in_be32(&p
->mr0
);
220 if (!(r
& EMAC_MR0_RXE
)) {
221 if (unlikely(!(r
& EMAC_MR0_RXI
))) {
222 /* Wait if previous async disable is still in progress */
223 int n
= dev
->stop_timeout
;
224 while (!(r
= in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
229 emac_report_timeout_error(dev
,
230 "RX disable timeout");
232 out_be32(&p
->mr0
, r
| EMAC_MR0_RXE
);
238 static void emac_rx_disable(struct emac_instance
*dev
)
240 struct emac_regs __iomem
*p
= dev
->emacp
;
243 DBG(dev
, "rx_disable" NL
);
245 r
= in_be32(&p
->mr0
);
246 if (r
& EMAC_MR0_RXE
) {
247 int n
= dev
->stop_timeout
;
248 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
249 while (!(in_be32(&p
->mr0
) & EMAC_MR0_RXI
) && n
) {
254 emac_report_timeout_error(dev
, "RX disable timeout");
258 static inline void emac_netif_stop(struct emac_instance
*dev
)
260 netif_tx_lock_bh(dev
->ndev
);
262 netif_tx_unlock_bh(dev
->ndev
);
263 dev
->ndev
->trans_start
= jiffies
; /* prevent tx timeout */
264 mal_poll_disable(dev
->mal
, &dev
->commac
);
265 netif_tx_disable(dev
->ndev
);
268 static inline void emac_netif_start(struct emac_instance
*dev
)
270 netif_tx_lock_bh(dev
->ndev
);
272 if (dev
->mcast_pending
&& netif_running(dev
->ndev
))
273 __emac_set_multicast_list(dev
);
274 netif_tx_unlock_bh(dev
->ndev
);
276 netif_wake_queue(dev
->ndev
);
278 /* NOTE: unconditional netif_wake_queue is only appropriate
279 * so long as all callers are assured to have free tx slots
280 * (taken from tg3... though the case where that is wrong is
281 * not terribly harmful)
283 mal_poll_enable(dev
->mal
, &dev
->commac
);
286 static inline void emac_rx_disable_async(struct emac_instance
*dev
)
288 struct emac_regs __iomem
*p
= dev
->emacp
;
291 DBG(dev
, "rx_disable_async" NL
);
293 r
= in_be32(&p
->mr0
);
294 if (r
& EMAC_MR0_RXE
)
295 out_be32(&p
->mr0
, r
& ~EMAC_MR0_RXE
);
298 static int emac_reset(struct emac_instance
*dev
)
300 struct emac_regs __iomem
*p
= dev
->emacp
;
303 DBG(dev
, "reset" NL
);
305 if (!dev
->reset_failed
) {
306 /* 40x erratum suggests stopping RX channel before reset,
309 emac_rx_disable(dev
);
310 emac_tx_disable(dev
);
313 out_be32(&p
->mr0
, EMAC_MR0_SRST
);
314 while ((in_be32(&p
->mr0
) & EMAC_MR0_SRST
) && n
)
318 dev
->reset_failed
= 0;
321 emac_report_timeout_error(dev
, "reset timeout");
322 dev
->reset_failed
= 1;
327 static void emac_hash_mc(struct emac_instance
*dev
)
329 struct emac_regs __iomem
*p
= dev
->emacp
;
331 struct dev_mc_list
*dmi
;
333 DBG(dev
, "hash_mc %d" NL
, dev
->ndev
->mc_count
);
335 for (dmi
= dev
->ndev
->mc_list
; dmi
; dmi
= dmi
->next
) {
337 DBG2(dev
, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL
,
338 dmi
->dmi_addr
[0], dmi
->dmi_addr
[1], dmi
->dmi_addr
[2],
339 dmi
->dmi_addr
[3], dmi
->dmi_addr
[4], dmi
->dmi_addr
[5]);
341 bit
= 63 - (ether_crc(ETH_ALEN
, dmi
->dmi_addr
) >> 26);
342 gaht
[bit
>> 4] |= 0x8000 >> (bit
& 0x0f);
344 out_be32(&p
->gaht1
, gaht
[0]);
345 out_be32(&p
->gaht2
, gaht
[1]);
346 out_be32(&p
->gaht3
, gaht
[2]);
347 out_be32(&p
->gaht4
, gaht
[3]);
350 static inline u32
emac_iff2rmr(struct net_device
*ndev
)
352 struct emac_instance
*dev
= netdev_priv(ndev
);
355 r
= EMAC_RMR_SP
| EMAC_RMR_SFCS
| EMAC_RMR_IAE
| EMAC_RMR_BAE
;
357 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
362 if (ndev
->flags
& IFF_PROMISC
)
364 else if (ndev
->flags
& IFF_ALLMULTI
|| ndev
->mc_count
> 32)
366 else if (ndev
->mc_count
> 0)
372 static u32
__emac_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
374 u32 ret
= EMAC_MR1_VLE
| EMAC_MR1_IST
| EMAC_MR1_TR0_MULT
;
376 DBG2(dev
, "__emac_calc_base_mr1" NL
);
380 ret
|= EMAC_MR1_TFS_2K
;
383 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
384 dev
->ndev
->name
, tx_size
);
389 ret
|= EMAC_MR1_RFS_16K
;
392 ret
|= EMAC_MR1_RFS_4K
;
395 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
396 dev
->ndev
->name
, rx_size
);
402 static u32
__emac4_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
404 u32 ret
= EMAC_MR1_VLE
| EMAC_MR1_IST
| EMAC4_MR1_TR
|
405 EMAC4_MR1_OBCI(dev
->opb_bus_freq
);
407 DBG2(dev
, "__emac4_calc_base_mr1" NL
);
411 ret
|= EMAC4_MR1_TFS_4K
;
414 ret
|= EMAC4_MR1_TFS_2K
;
417 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
418 dev
->ndev
->name
, tx_size
);
423 ret
|= EMAC4_MR1_RFS_16K
;
426 ret
|= EMAC4_MR1_RFS_4K
;
429 ret
|= EMAC4_MR1_RFS_2K
;
432 printk(KERN_WARNING
"%s: Unknown Rx FIFO size %d\n",
433 dev
->ndev
->name
, rx_size
);
439 static u32
emac_calc_base_mr1(struct emac_instance
*dev
, int tx_size
, int rx_size
)
441 return emac_has_feature(dev
, EMAC_FTR_EMAC4
) ?
442 __emac4_calc_base_mr1(dev
, tx_size
, rx_size
) :
443 __emac_calc_base_mr1(dev
, tx_size
, rx_size
);
446 static inline u32
emac_calc_trtr(struct emac_instance
*dev
, unsigned int size
)
448 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
449 return ((size
>> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4
;
451 return ((size
>> 6) - 1) << EMAC_TRTR_SHIFT
;
454 static inline u32
emac_calc_rwmr(struct emac_instance
*dev
,
455 unsigned int low
, unsigned int high
)
457 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
458 return (low
<< 22) | ( (high
& 0x3ff) << 6);
460 return (low
<< 23) | ( (high
& 0x1ff) << 7);
463 static int emac_configure(struct emac_instance
*dev
)
465 struct emac_regs __iomem
*p
= dev
->emacp
;
466 struct net_device
*ndev
= dev
->ndev
;
467 int tx_size
, rx_size
, link
= netif_carrier_ok(dev
->ndev
);
470 DBG(dev
, "configure" NL
);
473 out_be32(&p
->mr1
, in_be32(&p
->mr1
)
474 | EMAC_MR1_FDE
| EMAC_MR1_ILE
);
476 } else if (emac_reset(dev
) < 0)
479 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
480 tah_reset(dev
->tah_dev
);
482 DBG(dev
, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
483 link
, dev
->phy
.duplex
, dev
->phy
.pause
, dev
->phy
.asym_pause
);
485 /* Default fifo sizes */
486 tx_size
= dev
->tx_fifo_size
;
487 rx_size
= dev
->rx_fifo_size
;
489 /* No link, force loopback */
491 mr1
= EMAC_MR1_FDE
| EMAC_MR1_ILE
;
493 /* Check for full duplex */
494 else if (dev
->phy
.duplex
== DUPLEX_FULL
)
495 mr1
|= EMAC_MR1_FDE
| EMAC_MR1_MWSW_001
;
497 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
498 dev
->stop_timeout
= STOP_TIMEOUT_10
;
499 switch (dev
->phy
.speed
) {
501 if (emac_phy_gpcs(dev
->phy
.mode
)) {
502 mr1
|= EMAC_MR1_MF_1000GPCS
|
503 EMAC_MR1_MF_IPPA(dev
->phy
.address
);
505 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
506 * identify this GPCS PHY later.
508 out_be32(&p
->ipcr
, 0xdeadbeef);
510 mr1
|= EMAC_MR1_MF_1000
;
512 /* Extended fifo sizes */
513 tx_size
= dev
->tx_fifo_size_gige
;
514 rx_size
= dev
->rx_fifo_size_gige
;
516 if (dev
->ndev
->mtu
> ETH_DATA_LEN
) {
517 mr1
|= EMAC_MR1_JPSM
;
518 dev
->stop_timeout
= STOP_TIMEOUT_1000_JUMBO
;
520 dev
->stop_timeout
= STOP_TIMEOUT_1000
;
523 mr1
|= EMAC_MR1_MF_100
;
524 dev
->stop_timeout
= STOP_TIMEOUT_100
;
526 default: /* make gcc happy */
530 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
531 rgmii_set_speed(dev
->rgmii_dev
, dev
->rgmii_port
,
533 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
534 zmii_set_speed(dev
->zmii_dev
, dev
->zmii_port
, dev
->phy
.speed
);
536 /* on 40x erratum forces us to NOT use integrated flow control,
537 * let's hope it works on 44x ;)
539 if (!emac_has_feature(dev
, EMAC_FTR_NO_FLOW_CONTROL_40x
) &&
540 dev
->phy
.duplex
== DUPLEX_FULL
) {
542 mr1
|= EMAC_MR1_EIFC
| EMAC_MR1_APP
;
543 else if (dev
->phy
.asym_pause
)
547 /* Add base settings & fifo sizes & program MR1 */
548 mr1
|= emac_calc_base_mr1(dev
, tx_size
, rx_size
);
549 out_be32(&p
->mr1
, mr1
);
551 /* Set individual MAC address */
552 out_be32(&p
->iahr
, (ndev
->dev_addr
[0] << 8) | ndev
->dev_addr
[1]);
553 out_be32(&p
->ialr
, (ndev
->dev_addr
[2] << 24) |
554 (ndev
->dev_addr
[3] << 16) | (ndev
->dev_addr
[4] << 8) |
557 /* VLAN Tag Protocol ID */
558 out_be32(&p
->vtpid
, 0x8100);
560 /* Receive mode register */
561 r
= emac_iff2rmr(ndev
);
562 if (r
& EMAC_RMR_MAE
)
564 out_be32(&p
->rmr
, r
);
566 /* FIFOs thresholds */
567 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
568 r
= EMAC4_TMR1((dev
->mal_burst_size
/ dev
->fifo_entry_size
) + 1,
569 tx_size
/ 2 / dev
->fifo_entry_size
);
571 r
= EMAC_TMR1((dev
->mal_burst_size
/ dev
->fifo_entry_size
) + 1,
572 tx_size
/ 2 / dev
->fifo_entry_size
);
573 out_be32(&p
->tmr1
, r
);
574 out_be32(&p
->trtr
, emac_calc_trtr(dev
, tx_size
/ 2));
576 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
577 there should be still enough space in FIFO to allow the our link
578 partner time to process this frame and also time to send PAUSE
581 Here is the worst case scenario for the RX FIFO "headroom"
582 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
584 1) One maximum-length frame on TX 1522 bytes
585 2) One PAUSE frame time 64 bytes
586 3) PAUSE frame decode time allowance 64 bytes
587 4) One maximum-length frame on RX 1522 bytes
588 5) Round-trip propagation delay of the link (100Mb) 15 bytes
592 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
593 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
595 r
= emac_calc_rwmr(dev
, rx_size
/ 8 / dev
->fifo_entry_size
,
596 rx_size
/ 4 / dev
->fifo_entry_size
);
597 out_be32(&p
->rwmr
, r
);
599 /* Set PAUSE timer to the maximum */
600 out_be32(&p
->ptr
, 0xffff);
603 r
= EMAC_ISR_OVR
| EMAC_ISR_BP
| EMAC_ISR_SE
|
604 EMAC_ISR_ALE
| EMAC_ISR_BFCS
| EMAC_ISR_PTLE
| EMAC_ISR_ORE
|
605 EMAC_ISR_IRE
| EMAC_ISR_TE
;
606 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
607 r
|= EMAC4_ISR_TXPE
| EMAC4_ISR_RXPE
/* | EMAC4_ISR_TXUE |
609 out_be32(&p
->iser
, r
);
611 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
612 if (emac_phy_gpcs(dev
->phy
.mode
))
613 emac_mii_reset_phy(&dev
->phy
);
618 static void emac_reinitialize(struct emac_instance
*dev
)
620 DBG(dev
, "reinitialize" NL
);
622 emac_netif_stop(dev
);
623 if (!emac_configure(dev
)) {
627 emac_netif_start(dev
);
630 static void emac_full_tx_reset(struct emac_instance
*dev
)
632 DBG(dev
, "full_tx_reset" NL
);
634 emac_tx_disable(dev
);
635 mal_disable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
636 emac_clean_tx_ring(dev
);
637 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= 0;
641 mal_enable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
646 static void emac_reset_work(struct work_struct
*work
)
648 struct emac_instance
*dev
= container_of(work
, struct emac_instance
, reset_work
);
650 DBG(dev
, "reset_work" NL
);
652 mutex_lock(&dev
->link_lock
);
654 emac_netif_stop(dev
);
655 emac_full_tx_reset(dev
);
656 emac_netif_start(dev
);
658 mutex_unlock(&dev
->link_lock
);
661 static void emac_tx_timeout(struct net_device
*ndev
)
663 struct emac_instance
*dev
= netdev_priv(ndev
);
665 DBG(dev
, "tx_timeout" NL
);
667 schedule_work(&dev
->reset_work
);
671 static inline int emac_phy_done(struct emac_instance
*dev
, u32 stacr
)
673 int done
= !!(stacr
& EMAC_STACR_OC
);
675 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
681 static int __emac_mdio_read(struct emac_instance
*dev
, u8 id
, u8 reg
)
683 struct emac_regs __iomem
*p
= dev
->emacp
;
685 int n
, err
= -ETIMEDOUT
;
687 mutex_lock(&dev
->mdio_lock
);
689 DBG2(dev
, "mdio_read(%02x,%02x)" NL
, id
, reg
);
691 /* Enable proper MDIO port */
692 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
693 zmii_get_mdio(dev
->zmii_dev
, dev
->zmii_port
);
694 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
695 rgmii_get_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
697 /* Wait for management interface to become idle */
699 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
702 DBG2(dev
, " -> timeout wait idle\n");
707 /* Issue read command */
708 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
709 r
= EMAC4_STACR_BASE(dev
->opb_bus_freq
);
711 r
= EMAC_STACR_BASE(dev
->opb_bus_freq
);
712 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
714 if (emac_has_feature(dev
, EMAC_FTR_HAS_NEW_STACR
))
715 r
|= EMACX_STACR_STAC_READ
;
717 r
|= EMAC_STACR_STAC_READ
;
718 r
|= (reg
& EMAC_STACR_PRA_MASK
)
719 | ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
);
720 out_be32(&p
->stacr
, r
);
722 /* Wait for read to complete */
724 while (!emac_phy_done(dev
, (r
= in_be32(&p
->stacr
)))) {
727 DBG2(dev
, " -> timeout wait complete\n");
732 if (unlikely(r
& EMAC_STACR_PHYE
)) {
733 DBG(dev
, "mdio_read(%02x, %02x) failed" NL
, id
, reg
);
738 r
= ((r
>> EMAC_STACR_PHYD_SHIFT
) & EMAC_STACR_PHYD_MASK
);
740 DBG2(dev
, "mdio_read -> %04x" NL
, r
);
743 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
744 rgmii_put_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
745 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
746 zmii_put_mdio(dev
->zmii_dev
, dev
->zmii_port
);
747 mutex_unlock(&dev
->mdio_lock
);
749 return err
== 0 ? r
: err
;
752 static void __emac_mdio_write(struct emac_instance
*dev
, u8 id
, u8 reg
,
755 struct emac_regs __iomem
*p
= dev
->emacp
;
757 int n
, err
= -ETIMEDOUT
;
759 mutex_lock(&dev
->mdio_lock
);
761 DBG2(dev
, "mdio_write(%02x,%02x,%04x)" NL
, id
, reg
, val
);
763 /* Enable proper MDIO port */
764 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
765 zmii_get_mdio(dev
->zmii_dev
, dev
->zmii_port
);
766 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
767 rgmii_get_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
769 /* Wait for management interface to be idle */
771 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
774 DBG2(dev
, " -> timeout wait idle\n");
779 /* Issue write command */
780 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
781 r
= EMAC4_STACR_BASE(dev
->opb_bus_freq
);
783 r
= EMAC_STACR_BASE(dev
->opb_bus_freq
);
784 if (emac_has_feature(dev
, EMAC_FTR_STACR_OC_INVERT
))
786 if (emac_has_feature(dev
, EMAC_FTR_HAS_NEW_STACR
))
787 r
|= EMACX_STACR_STAC_WRITE
;
789 r
|= EMAC_STACR_STAC_WRITE
;
790 r
|= (reg
& EMAC_STACR_PRA_MASK
) |
791 ((id
& EMAC_STACR_PCDA_MASK
) << EMAC_STACR_PCDA_SHIFT
) |
792 (val
<< EMAC_STACR_PHYD_SHIFT
);
793 out_be32(&p
->stacr
, r
);
795 /* Wait for write to complete */
797 while (!emac_phy_done(dev
, in_be32(&p
->stacr
))) {
800 DBG2(dev
, " -> timeout wait complete\n");
806 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
807 rgmii_put_mdio(dev
->rgmii_dev
, dev
->rgmii_port
);
808 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
809 zmii_put_mdio(dev
->zmii_dev
, dev
->zmii_port
);
810 mutex_unlock(&dev
->mdio_lock
);
813 static int emac_mdio_read(struct net_device
*ndev
, int id
, int reg
)
815 struct emac_instance
*dev
= netdev_priv(ndev
);
818 res
= __emac_mdio_read(dev
->mdio_instance
? dev
->mdio_instance
: dev
,
823 static void emac_mdio_write(struct net_device
*ndev
, int id
, int reg
, int val
)
825 struct emac_instance
*dev
= netdev_priv(ndev
);
827 __emac_mdio_write(dev
->mdio_instance
? dev
->mdio_instance
: dev
,
828 (u8
) id
, (u8
) reg
, (u16
) val
);
832 static void __emac_set_multicast_list(struct emac_instance
*dev
)
834 struct emac_regs __iomem
*p
= dev
->emacp
;
835 u32 rmr
= emac_iff2rmr(dev
->ndev
);
837 DBG(dev
, "__multicast %08x" NL
, rmr
);
839 /* I decided to relax register access rules here to avoid
842 * There is a real problem with EMAC4 core if we use MWSW_001 bit
843 * in MR1 register and do a full EMAC reset.
844 * One TX BD status update is delayed and, after EMAC reset, it
845 * never happens, resulting in TX hung (it'll be recovered by TX
846 * timeout handler eventually, but this is just gross).
847 * So we either have to do full TX reset or try to cheat here :)
849 * The only required change is to RX mode register, so I *think* all
850 * we need is just to stop RX channel. This seems to work on all
853 * If we need the full reset, we might just trigger the workqueue
854 * and do it async... a bit nasty but should work --BenH
856 dev
->mcast_pending
= 0;
857 emac_rx_disable(dev
);
858 if (rmr
& EMAC_RMR_MAE
)
860 out_be32(&p
->rmr
, rmr
);
865 static void emac_set_multicast_list(struct net_device
*ndev
)
867 struct emac_instance
*dev
= netdev_priv(ndev
);
869 DBG(dev
, "multicast" NL
);
871 BUG_ON(!netif_running(dev
->ndev
));
874 dev
->mcast_pending
= 1;
877 __emac_set_multicast_list(dev
);
880 static int emac_resize_rx_ring(struct emac_instance
*dev
, int new_mtu
)
882 int rx_sync_size
= emac_rx_sync_size(new_mtu
);
883 int rx_skb_size
= emac_rx_skb_size(new_mtu
);
886 mutex_lock(&dev
->link_lock
);
887 emac_netif_stop(dev
);
888 emac_rx_disable(dev
);
889 mal_disable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
891 if (dev
->rx_sg_skb
) {
892 ++dev
->estats
.rx_dropped_resize
;
893 dev_kfree_skb(dev
->rx_sg_skb
);
894 dev
->rx_sg_skb
= NULL
;
897 /* Make a first pass over RX ring and mark BDs ready, dropping
898 * non-processed packets on the way. We need this as a separate pass
899 * to simplify error recovery in the case of allocation failure later.
901 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
902 if (dev
->rx_desc
[i
].ctrl
& MAL_RX_CTRL_FIRST
)
903 ++dev
->estats
.rx_dropped_resize
;
905 dev
->rx_desc
[i
].data_len
= 0;
906 dev
->rx_desc
[i
].ctrl
= MAL_RX_CTRL_EMPTY
|
907 (i
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
910 /* Reallocate RX ring only if bigger skb buffers are required */
911 if (rx_skb_size
<= dev
->rx_skb_size
)
914 /* Second pass, allocate new skbs */
915 for (i
= 0; i
< NUM_RX_BUFF
; ++i
) {
916 struct sk_buff
*skb
= alloc_skb(rx_skb_size
, GFP_ATOMIC
);
922 BUG_ON(!dev
->rx_skb
[i
]);
923 dev_kfree_skb(dev
->rx_skb
[i
]);
925 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
926 dev
->rx_desc
[i
].data_ptr
=
927 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2, rx_sync_size
,
928 DMA_FROM_DEVICE
) + 2;
929 dev
->rx_skb
[i
] = skb
;
932 /* Check if we need to change "Jumbo" bit in MR1 */
933 if ((new_mtu
> ETH_DATA_LEN
) ^ (dev
->ndev
->mtu
> ETH_DATA_LEN
)) {
934 /* This is to prevent starting RX channel in emac_rx_enable() */
935 set_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
937 dev
->ndev
->mtu
= new_mtu
;
938 emac_full_tx_reset(dev
);
941 mal_set_rcbs(dev
->mal
, dev
->mal_rx_chan
, emac_rx_size(new_mtu
));
944 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
946 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
948 emac_netif_start(dev
);
949 mutex_unlock(&dev
->link_lock
);
954 /* Process ctx, rtnl_lock semaphore */
955 static int emac_change_mtu(struct net_device
*ndev
, int new_mtu
)
957 struct emac_instance
*dev
= netdev_priv(ndev
);
960 if (new_mtu
< EMAC_MIN_MTU
|| new_mtu
> dev
->max_mtu
)
963 DBG(dev
, "change_mtu(%d)" NL
, new_mtu
);
965 if (netif_running(ndev
)) {
966 /* Check if we really need to reinitalize RX ring */
967 if (emac_rx_skb_size(ndev
->mtu
) != emac_rx_skb_size(new_mtu
))
968 ret
= emac_resize_rx_ring(dev
, new_mtu
);
973 dev
->rx_skb_size
= emac_rx_skb_size(new_mtu
);
974 dev
->rx_sync_size
= emac_rx_sync_size(new_mtu
);
980 static void emac_clean_tx_ring(struct emac_instance
*dev
)
984 for (i
= 0; i
< NUM_TX_BUFF
; ++i
) {
985 if (dev
->tx_skb
[i
]) {
986 dev_kfree_skb(dev
->tx_skb
[i
]);
987 dev
->tx_skb
[i
] = NULL
;
988 if (dev
->tx_desc
[i
].ctrl
& MAL_TX_CTRL_READY
)
989 ++dev
->estats
.tx_dropped
;
991 dev
->tx_desc
[i
].ctrl
= 0;
992 dev
->tx_desc
[i
].data_ptr
= 0;
996 static void emac_clean_rx_ring(struct emac_instance
*dev
)
1000 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
1001 if (dev
->rx_skb
[i
]) {
1002 dev
->rx_desc
[i
].ctrl
= 0;
1003 dev_kfree_skb(dev
->rx_skb
[i
]);
1004 dev
->rx_skb
[i
] = NULL
;
1005 dev
->rx_desc
[i
].data_ptr
= 0;
1008 if (dev
->rx_sg_skb
) {
1009 dev_kfree_skb(dev
->rx_sg_skb
);
1010 dev
->rx_sg_skb
= NULL
;
1014 static inline int emac_alloc_rx_skb(struct emac_instance
*dev
, int slot
,
1017 struct sk_buff
*skb
= alloc_skb(dev
->rx_skb_size
, flags
);
1021 dev
->rx_skb
[slot
] = skb
;
1022 dev
->rx_desc
[slot
].data_len
= 0;
1024 skb_reserve(skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1025 dev
->rx_desc
[slot
].data_ptr
=
1026 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2, dev
->rx_sync_size
,
1027 DMA_FROM_DEVICE
) + 2;
1029 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1030 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1035 static void emac_print_link_status(struct emac_instance
*dev
)
1037 if (netif_carrier_ok(dev
->ndev
))
1038 printk(KERN_INFO
"%s: link is up, %d %s%s\n",
1039 dev
->ndev
->name
, dev
->phy
.speed
,
1040 dev
->phy
.duplex
== DUPLEX_FULL
? "FDX" : "HDX",
1041 dev
->phy
.pause
? ", pause enabled" :
1042 dev
->phy
.asym_pause
? ", asymmetric pause enabled" : "");
1044 printk(KERN_INFO
"%s: link is down\n", dev
->ndev
->name
);
1047 /* Process ctx, rtnl_lock semaphore */
1048 static int emac_open(struct net_device
*ndev
)
1050 struct emac_instance
*dev
= netdev_priv(ndev
);
1053 DBG(dev
, "open" NL
);
1055 /* Setup error IRQ handler */
1056 err
= request_irq(dev
->emac_irq
, emac_irq
, 0, "EMAC", dev
);
1058 printk(KERN_ERR
"%s: failed to request IRQ %d\n",
1059 ndev
->name
, dev
->emac_irq
);
1063 /* Allocate RX ring */
1064 for (i
= 0; i
< NUM_RX_BUFF
; ++i
)
1065 if (emac_alloc_rx_skb(dev
, i
, GFP_KERNEL
)) {
1066 printk(KERN_ERR
"%s: failed to allocate RX ring\n",
1071 dev
->tx_cnt
= dev
->tx_slot
= dev
->ack_slot
= dev
->rx_slot
= 0;
1072 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1073 dev
->rx_sg_skb
= NULL
;
1075 mutex_lock(&dev
->link_lock
);
1078 /* Start PHY polling now.
1080 if (dev
->phy
.address
>= 0) {
1081 int link_poll_interval
;
1082 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
1083 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
1084 netif_carrier_on(dev
->ndev
);
1085 link_poll_interval
= PHY_POLL_LINK_ON
;
1087 netif_carrier_off(dev
->ndev
);
1088 link_poll_interval
= PHY_POLL_LINK_OFF
;
1090 dev
->link_polling
= 1;
1092 schedule_delayed_work(&dev
->link_work
, link_poll_interval
);
1093 emac_print_link_status(dev
);
1095 netif_carrier_on(dev
->ndev
);
1097 emac_configure(dev
);
1098 mal_poll_add(dev
->mal
, &dev
->commac
);
1099 mal_enable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
1100 mal_set_rcbs(dev
->mal
, dev
->mal_rx_chan
, emac_rx_size(ndev
->mtu
));
1101 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1102 emac_tx_enable(dev
);
1103 emac_rx_enable(dev
);
1104 emac_netif_start(dev
);
1106 mutex_unlock(&dev
->link_lock
);
1110 emac_clean_rx_ring(dev
);
1111 free_irq(dev
->emac_irq
, dev
);
1118 static int emac_link_differs(struct emac_instance
*dev
)
1120 u32 r
= in_be32(&dev
->emacp
->mr1
);
1122 int duplex
= r
& EMAC_MR1_FDE
? DUPLEX_FULL
: DUPLEX_HALF
;
1123 int speed
, pause
, asym_pause
;
1125 if (r
& EMAC_MR1_MF_1000
)
1127 else if (r
& EMAC_MR1_MF_100
)
1132 switch (r
& (EMAC_MR1_EIFC
| EMAC_MR1_APP
)) {
1133 case (EMAC_MR1_EIFC
| EMAC_MR1_APP
):
1142 pause
= asym_pause
= 0;
1144 return speed
!= dev
->phy
.speed
|| duplex
!= dev
->phy
.duplex
||
1145 pause
!= dev
->phy
.pause
|| asym_pause
!= dev
->phy
.asym_pause
;
1149 static void emac_link_timer(struct work_struct
*work
)
1151 struct emac_instance
*dev
=
1152 container_of((struct delayed_work
*)work
,
1153 struct emac_instance
, link_work
);
1154 int link_poll_interval
;
1156 mutex_lock(&dev
->link_lock
);
1157 DBG2(dev
, "link timer" NL
);
1162 if (dev
->phy
.def
->ops
->poll_link(&dev
->phy
)) {
1163 if (!netif_carrier_ok(dev
->ndev
)) {
1164 /* Get new link parameters */
1165 dev
->phy
.def
->ops
->read_link(&dev
->phy
);
1167 netif_carrier_on(dev
->ndev
);
1168 emac_netif_stop(dev
);
1169 emac_full_tx_reset(dev
);
1170 emac_netif_start(dev
);
1171 emac_print_link_status(dev
);
1173 link_poll_interval
= PHY_POLL_LINK_ON
;
1175 if (netif_carrier_ok(dev
->ndev
)) {
1176 netif_carrier_off(dev
->ndev
);
1177 netif_tx_disable(dev
->ndev
);
1178 emac_reinitialize(dev
);
1179 emac_print_link_status(dev
);
1181 link_poll_interval
= PHY_POLL_LINK_OFF
;
1183 schedule_delayed_work(&dev
->link_work
, link_poll_interval
);
1185 mutex_unlock(&dev
->link_lock
);
1188 static void emac_force_link_update(struct emac_instance
*dev
)
1190 netif_carrier_off(dev
->ndev
);
1192 if (dev
->link_polling
) {
1193 cancel_rearming_delayed_work(&dev
->link_work
);
1194 if (dev
->link_polling
)
1195 schedule_delayed_work(&dev
->link_work
, PHY_POLL_LINK_OFF
);
1199 /* Process ctx, rtnl_lock semaphore */
1200 static int emac_close(struct net_device
*ndev
)
1202 struct emac_instance
*dev
= netdev_priv(ndev
);
1204 DBG(dev
, "close" NL
);
1206 if (dev
->phy
.address
>= 0) {
1207 dev
->link_polling
= 0;
1208 cancel_rearming_delayed_work(&dev
->link_work
);
1210 mutex_lock(&dev
->link_lock
);
1211 emac_netif_stop(dev
);
1213 mutex_unlock(&dev
->link_lock
);
1215 emac_rx_disable(dev
);
1216 emac_tx_disable(dev
);
1217 mal_disable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1218 mal_disable_tx_channel(dev
->mal
, dev
->mal_tx_chan
);
1219 mal_poll_del(dev
->mal
, &dev
->commac
);
1221 emac_clean_tx_ring(dev
);
1222 emac_clean_rx_ring(dev
);
1224 free_irq(dev
->emac_irq
, dev
);
1229 static inline u16
emac_tx_csum(struct emac_instance
*dev
,
1230 struct sk_buff
*skb
)
1232 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
&&
1233 skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
1234 ++dev
->stats
.tx_packets_csum
;
1235 return EMAC_TX_CTRL_TAH_CSUM
;
1240 static inline int emac_xmit_finish(struct emac_instance
*dev
, int len
)
1242 struct emac_regs __iomem
*p
= dev
->emacp
;
1243 struct net_device
*ndev
= dev
->ndev
;
1245 /* Send the packet out. If the if makes a significant perf
1246 * difference, then we can store the TMR0 value in "dev"
1249 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
1250 out_be32(&p
->tmr0
, EMAC4_TMR0_XMIT
);
1252 out_be32(&p
->tmr0
, EMAC_TMR0_XMIT
);
1254 if (unlikely(++dev
->tx_cnt
== NUM_TX_BUFF
)) {
1255 netif_stop_queue(ndev
);
1256 DBG2(dev
, "stopped TX queue" NL
);
1259 ndev
->trans_start
= jiffies
;
1260 ++dev
->stats
.tx_packets
;
1261 dev
->stats
.tx_bytes
+= len
;
1267 static int emac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1269 struct emac_instance
*dev
= netdev_priv(ndev
);
1270 unsigned int len
= skb
->len
;
1273 u16 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1274 MAL_TX_CTRL_LAST
| emac_tx_csum(dev
, skb
);
1276 slot
= dev
->tx_slot
++;
1277 if (dev
->tx_slot
== NUM_TX_BUFF
) {
1279 ctrl
|= MAL_TX_CTRL_WRAP
;
1282 DBG2(dev
, "xmit(%u) %d" NL
, len
, slot
);
1284 dev
->tx_skb
[slot
] = skb
;
1285 dev
->tx_desc
[slot
].data_ptr
= dma_map_single(&dev
->ofdev
->dev
,
1288 dev
->tx_desc
[slot
].data_len
= (u16
) len
;
1290 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1292 return emac_xmit_finish(dev
, len
);
1295 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1296 static inline int emac_xmit_split(struct emac_instance
*dev
, int slot
,
1297 u32 pd
, int len
, int last
, u16 base_ctrl
)
1300 u16 ctrl
= base_ctrl
;
1301 int chunk
= min(len
, MAL_MAX_TX_SIZE
);
1304 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1307 ctrl
|= MAL_TX_CTRL_LAST
;
1308 if (slot
== NUM_TX_BUFF
- 1)
1309 ctrl
|= MAL_TX_CTRL_WRAP
;
1311 dev
->tx_skb
[slot
] = NULL
;
1312 dev
->tx_desc
[slot
].data_ptr
= pd
;
1313 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1314 dev
->tx_desc
[slot
].ctrl
= ctrl
;
1325 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1326 static int emac_start_xmit_sg(struct sk_buff
*skb
, struct net_device
*ndev
)
1328 struct emac_instance
*dev
= netdev_priv(ndev
);
1329 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1330 int len
= skb
->len
, chunk
;
1335 /* This is common "fast" path */
1336 if (likely(!nr_frags
&& len
<= MAL_MAX_TX_SIZE
))
1337 return emac_start_xmit(skb
, ndev
);
1339 len
-= skb
->data_len
;
1341 /* Note, this is only an *estimation*, we can still run out of empty
1342 * slots because of the additional fragmentation into
1343 * MAL_MAX_TX_SIZE-sized chunks
1345 if (unlikely(dev
->tx_cnt
+ nr_frags
+ mal_tx_chunks(len
) > NUM_TX_BUFF
))
1348 ctrl
= EMAC_TX_CTRL_GFCS
| EMAC_TX_CTRL_GP
| MAL_TX_CTRL_READY
|
1349 emac_tx_csum(dev
, skb
);
1350 slot
= dev
->tx_slot
;
1353 dev
->tx_skb
[slot
] = NULL
;
1354 chunk
= min(len
, MAL_MAX_TX_SIZE
);
1355 dev
->tx_desc
[slot
].data_ptr
= pd
=
1356 dma_map_single(&dev
->ofdev
->dev
, skb
->data
, len
, DMA_TO_DEVICE
);
1357 dev
->tx_desc
[slot
].data_len
= (u16
) chunk
;
1360 slot
= emac_xmit_split(dev
, slot
, pd
+ chunk
, len
, !nr_frags
,
1363 for (i
= 0; i
< nr_frags
; ++i
) {
1364 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
1367 if (unlikely(dev
->tx_cnt
+ mal_tx_chunks(len
) >= NUM_TX_BUFF
))
1370 pd
= dma_map_page(&dev
->ofdev
->dev
, frag
->page
, frag
->page_offset
, len
,
1373 slot
= emac_xmit_split(dev
, slot
, pd
, len
, i
== nr_frags
- 1,
1377 DBG2(dev
, "xmit_sg(%u) %d - %d" NL
, skb
->len
, dev
->tx_slot
, slot
);
1379 /* Attach skb to the last slot so we don't release it too early */
1380 dev
->tx_skb
[slot
] = skb
;
1382 /* Send the packet out */
1383 if (dev
->tx_slot
== NUM_TX_BUFF
- 1)
1384 ctrl
|= MAL_TX_CTRL_WRAP
;
1386 dev
->tx_desc
[dev
->tx_slot
].ctrl
= ctrl
;
1387 dev
->tx_slot
= (slot
+ 1) % NUM_TX_BUFF
;
1389 return emac_xmit_finish(dev
, skb
->len
);
1392 /* Well, too bad. Our previous estimation was overly optimistic.
1395 while (slot
!= dev
->tx_slot
) {
1396 dev
->tx_desc
[slot
].ctrl
= 0;
1399 slot
= NUM_TX_BUFF
- 1;
1401 ++dev
->estats
.tx_undo
;
1404 netif_stop_queue(ndev
);
1405 DBG2(dev
, "stopped TX queue" NL
);
1409 # define emac_start_xmit_sg emac_start_xmit
1410 #endif /* !defined(CONFIG_IBM_NEW_EMAC_TAH) */
1413 static void emac_parse_tx_error(struct emac_instance
*dev
, u16 ctrl
)
1415 struct emac_error_stats
*st
= &dev
->estats
;
1417 DBG(dev
, "BD TX error %04x" NL
, ctrl
);
1420 if (ctrl
& EMAC_TX_ST_BFCS
)
1421 ++st
->tx_bd_bad_fcs
;
1422 if (ctrl
& EMAC_TX_ST_LCS
)
1423 ++st
->tx_bd_carrier_loss
;
1424 if (ctrl
& EMAC_TX_ST_ED
)
1425 ++st
->tx_bd_excessive_deferral
;
1426 if (ctrl
& EMAC_TX_ST_EC
)
1427 ++st
->tx_bd_excessive_collisions
;
1428 if (ctrl
& EMAC_TX_ST_LC
)
1429 ++st
->tx_bd_late_collision
;
1430 if (ctrl
& EMAC_TX_ST_MC
)
1431 ++st
->tx_bd_multple_collisions
;
1432 if (ctrl
& EMAC_TX_ST_SC
)
1433 ++st
->tx_bd_single_collision
;
1434 if (ctrl
& EMAC_TX_ST_UR
)
1435 ++st
->tx_bd_underrun
;
1436 if (ctrl
& EMAC_TX_ST_SQE
)
1440 static void emac_poll_tx(void *param
)
1442 struct emac_instance
*dev
= param
;
1445 DBG2(dev
, "poll_tx, %d %d" NL
, dev
->tx_cnt
, dev
->ack_slot
);
1447 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
1448 bad_mask
= EMAC_IS_BAD_TX_TAH
;
1450 bad_mask
= EMAC_IS_BAD_TX
;
1452 netif_tx_lock_bh(dev
->ndev
);
1455 int slot
= dev
->ack_slot
, n
= 0;
1457 ctrl
= dev
->tx_desc
[slot
].ctrl
;
1458 if (!(ctrl
& MAL_TX_CTRL_READY
)) {
1459 struct sk_buff
*skb
= dev
->tx_skb
[slot
];
1464 dev
->tx_skb
[slot
] = NULL
;
1466 slot
= (slot
+ 1) % NUM_TX_BUFF
;
1468 if (unlikely(ctrl
& bad_mask
))
1469 emac_parse_tx_error(dev
, ctrl
);
1475 dev
->ack_slot
= slot
;
1476 if (netif_queue_stopped(dev
->ndev
) &&
1477 dev
->tx_cnt
< EMAC_TX_WAKEUP_THRESH
)
1478 netif_wake_queue(dev
->ndev
);
1480 DBG2(dev
, "tx %d pkts" NL
, n
);
1483 netif_tx_unlock_bh(dev
->ndev
);
1486 static inline void emac_recycle_rx_skb(struct emac_instance
*dev
, int slot
,
1489 struct sk_buff
*skb
= dev
->rx_skb
[slot
];
1491 DBG2(dev
, "recycle %d %d" NL
, slot
, len
);
1494 dma_map_single(&dev
->ofdev
->dev
, skb
->data
- 2,
1495 EMAC_DMA_ALIGN(len
+ 2), DMA_FROM_DEVICE
);
1497 dev
->rx_desc
[slot
].data_len
= 0;
1499 dev
->rx_desc
[slot
].ctrl
= MAL_RX_CTRL_EMPTY
|
1500 (slot
== (NUM_RX_BUFF
- 1) ? MAL_RX_CTRL_WRAP
: 0);
1503 static void emac_parse_rx_error(struct emac_instance
*dev
, u16 ctrl
)
1505 struct emac_error_stats
*st
= &dev
->estats
;
1507 DBG(dev
, "BD RX error %04x" NL
, ctrl
);
1510 if (ctrl
& EMAC_RX_ST_OE
)
1511 ++st
->rx_bd_overrun
;
1512 if (ctrl
& EMAC_RX_ST_BP
)
1513 ++st
->rx_bd_bad_packet
;
1514 if (ctrl
& EMAC_RX_ST_RP
)
1515 ++st
->rx_bd_runt_packet
;
1516 if (ctrl
& EMAC_RX_ST_SE
)
1517 ++st
->rx_bd_short_event
;
1518 if (ctrl
& EMAC_RX_ST_AE
)
1519 ++st
->rx_bd_alignment_error
;
1520 if (ctrl
& EMAC_RX_ST_BFCS
)
1521 ++st
->rx_bd_bad_fcs
;
1522 if (ctrl
& EMAC_RX_ST_PTL
)
1523 ++st
->rx_bd_packet_too_long
;
1524 if (ctrl
& EMAC_RX_ST_ORE
)
1525 ++st
->rx_bd_out_of_range
;
1526 if (ctrl
& EMAC_RX_ST_IRE
)
1527 ++st
->rx_bd_in_range
;
1530 static inline void emac_rx_csum(struct emac_instance
*dev
,
1531 struct sk_buff
*skb
, u16 ctrl
)
1533 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1534 if (!ctrl
&& dev
->tah_dev
) {
1535 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1536 ++dev
->stats
.rx_packets_csum
;
1541 static inline int emac_rx_sg_append(struct emac_instance
*dev
, int slot
)
1543 if (likely(dev
->rx_sg_skb
!= NULL
)) {
1544 int len
= dev
->rx_desc
[slot
].data_len
;
1545 int tot_len
= dev
->rx_sg_skb
->len
+ len
;
1547 if (unlikely(tot_len
+ 2 > dev
->rx_skb_size
)) {
1548 ++dev
->estats
.rx_dropped_mtu
;
1549 dev_kfree_skb(dev
->rx_sg_skb
);
1550 dev
->rx_sg_skb
= NULL
;
1552 cacheable_memcpy(skb_tail_pointer(dev
->rx_sg_skb
),
1553 dev
->rx_skb
[slot
]->data
, len
);
1554 skb_put(dev
->rx_sg_skb
, len
);
1555 emac_recycle_rx_skb(dev
, slot
, len
);
1559 emac_recycle_rx_skb(dev
, slot
, 0);
1563 /* NAPI poll context */
1564 static int emac_poll_rx(void *param
, int budget
)
1566 struct emac_instance
*dev
= param
;
1567 int slot
= dev
->rx_slot
, received
= 0;
1569 DBG2(dev
, "poll_rx(%d)" NL
, budget
);
1572 while (budget
> 0) {
1574 struct sk_buff
*skb
;
1575 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1577 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1580 skb
= dev
->rx_skb
[slot
];
1582 len
= dev
->rx_desc
[slot
].data_len
;
1584 if (unlikely(!MAL_IS_SINGLE_RX(ctrl
)))
1587 ctrl
&= EMAC_BAD_RX_MASK
;
1588 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1589 emac_parse_rx_error(dev
, ctrl
);
1590 ++dev
->estats
.rx_dropped_error
;
1591 emac_recycle_rx_skb(dev
, slot
, 0);
1596 if (len
&& len
< EMAC_RX_COPY_THRESH
) {
1597 struct sk_buff
*copy_skb
=
1598 alloc_skb(len
+ EMAC_RX_SKB_HEADROOM
+ 2, GFP_ATOMIC
);
1599 if (unlikely(!copy_skb
))
1602 skb_reserve(copy_skb
, EMAC_RX_SKB_HEADROOM
+ 2);
1603 cacheable_memcpy(copy_skb
->data
- 2, skb
->data
- 2,
1605 emac_recycle_rx_skb(dev
, slot
, len
);
1607 } else if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
)))
1612 skb
->dev
= dev
->ndev
;
1613 skb
->protocol
= eth_type_trans(skb
, dev
->ndev
);
1614 emac_rx_csum(dev
, skb
, ctrl
);
1616 if (unlikely(netif_receive_skb(skb
) == NET_RX_DROP
))
1617 ++dev
->estats
.rx_dropped_stack
;
1619 ++dev
->stats
.rx_packets
;
1621 dev
->stats
.rx_bytes
+= len
;
1622 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1627 if (ctrl
& MAL_RX_CTRL_FIRST
) {
1628 BUG_ON(dev
->rx_sg_skb
);
1629 if (unlikely(emac_alloc_rx_skb(dev
, slot
, GFP_ATOMIC
))) {
1630 DBG(dev
, "rx OOM %d" NL
, slot
);
1631 ++dev
->estats
.rx_dropped_oom
;
1632 emac_recycle_rx_skb(dev
, slot
, 0);
1634 dev
->rx_sg_skb
= skb
;
1637 } else if (!emac_rx_sg_append(dev
, slot
) &&
1638 (ctrl
& MAL_RX_CTRL_LAST
)) {
1640 skb
= dev
->rx_sg_skb
;
1641 dev
->rx_sg_skb
= NULL
;
1643 ctrl
&= EMAC_BAD_RX_MASK
;
1644 if (unlikely(ctrl
&& ctrl
!= EMAC_RX_TAH_BAD_CSUM
)) {
1645 emac_parse_rx_error(dev
, ctrl
);
1646 ++dev
->estats
.rx_dropped_error
;
1654 DBG(dev
, "rx OOM %d" NL
, slot
);
1655 /* Drop the packet and recycle skb */
1656 ++dev
->estats
.rx_dropped_oom
;
1657 emac_recycle_rx_skb(dev
, slot
, 0);
1662 DBG2(dev
, "rx %d BDs" NL
, received
);
1663 dev
->rx_slot
= slot
;
1666 if (unlikely(budget
&& test_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
))) {
1668 if (!(dev
->rx_desc
[slot
].ctrl
& MAL_RX_CTRL_EMPTY
)) {
1669 DBG2(dev
, "rx restart" NL
);
1674 if (dev
->rx_sg_skb
) {
1675 DBG2(dev
, "dropping partial rx packet" NL
);
1676 ++dev
->estats
.rx_dropped_error
;
1677 dev_kfree_skb(dev
->rx_sg_skb
);
1678 dev
->rx_sg_skb
= NULL
;
1681 clear_bit(MAL_COMMAC_RX_STOPPED
, &dev
->commac
.flags
);
1682 mal_enable_rx_channel(dev
->mal
, dev
->mal_rx_chan
);
1683 emac_rx_enable(dev
);
1689 /* NAPI poll context */
1690 static int emac_peek_rx(void *param
)
1692 struct emac_instance
*dev
= param
;
1694 return !(dev
->rx_desc
[dev
->rx_slot
].ctrl
& MAL_RX_CTRL_EMPTY
);
1697 /* NAPI poll context */
1698 static int emac_peek_rx_sg(void *param
)
1700 struct emac_instance
*dev
= param
;
1702 int slot
= dev
->rx_slot
;
1704 u16 ctrl
= dev
->rx_desc
[slot
].ctrl
;
1705 if (ctrl
& MAL_RX_CTRL_EMPTY
)
1707 else if (ctrl
& MAL_RX_CTRL_LAST
)
1710 slot
= (slot
+ 1) % NUM_RX_BUFF
;
1712 /* I'm just being paranoid here :) */
1713 if (unlikely(slot
== dev
->rx_slot
))
1719 static void emac_rxde(void *param
)
1721 struct emac_instance
*dev
= param
;
1723 ++dev
->estats
.rx_stopped
;
1724 emac_rx_disable_async(dev
);
1728 static irqreturn_t
emac_irq(int irq
, void *dev_instance
)
1730 struct emac_instance
*dev
= dev_instance
;
1731 struct emac_regs __iomem
*p
= dev
->emacp
;
1732 struct emac_error_stats
*st
= &dev
->estats
;
1735 spin_lock(&dev
->lock
);
1737 isr
= in_be32(&p
->isr
);
1738 out_be32(&p
->isr
, isr
);
1740 DBG(dev
, "isr = %08x" NL
, isr
);
1742 if (isr
& EMAC4_ISR_TXPE
)
1744 if (isr
& EMAC4_ISR_RXPE
)
1746 if (isr
& EMAC4_ISR_TXUE
)
1748 if (isr
& EMAC4_ISR_RXOE
)
1749 ++st
->rx_fifo_overrun
;
1750 if (isr
& EMAC_ISR_OVR
)
1752 if (isr
& EMAC_ISR_BP
)
1753 ++st
->rx_bad_packet
;
1754 if (isr
& EMAC_ISR_RP
)
1755 ++st
->rx_runt_packet
;
1756 if (isr
& EMAC_ISR_SE
)
1757 ++st
->rx_short_event
;
1758 if (isr
& EMAC_ISR_ALE
)
1759 ++st
->rx_alignment_error
;
1760 if (isr
& EMAC_ISR_BFCS
)
1762 if (isr
& EMAC_ISR_PTLE
)
1763 ++st
->rx_packet_too_long
;
1764 if (isr
& EMAC_ISR_ORE
)
1765 ++st
->rx_out_of_range
;
1766 if (isr
& EMAC_ISR_IRE
)
1768 if (isr
& EMAC_ISR_SQE
)
1770 if (isr
& EMAC_ISR_TE
)
1773 spin_unlock(&dev
->lock
);
1778 static struct net_device_stats
*emac_stats(struct net_device
*ndev
)
1780 struct emac_instance
*dev
= netdev_priv(ndev
);
1781 struct emac_stats
*st
= &dev
->stats
;
1782 struct emac_error_stats
*est
= &dev
->estats
;
1783 struct net_device_stats
*nst
= &dev
->nstats
;
1784 unsigned long flags
;
1786 DBG2(dev
, "stats" NL
);
1788 /* Compute "legacy" statistics */
1789 spin_lock_irqsave(&dev
->lock
, flags
);
1790 nst
->rx_packets
= (unsigned long)st
->rx_packets
;
1791 nst
->rx_bytes
= (unsigned long)st
->rx_bytes
;
1792 nst
->tx_packets
= (unsigned long)st
->tx_packets
;
1793 nst
->tx_bytes
= (unsigned long)st
->tx_bytes
;
1794 nst
->rx_dropped
= (unsigned long)(est
->rx_dropped_oom
+
1795 est
->rx_dropped_error
+
1796 est
->rx_dropped_resize
+
1797 est
->rx_dropped_mtu
);
1798 nst
->tx_dropped
= (unsigned long)est
->tx_dropped
;
1800 nst
->rx_errors
= (unsigned long)est
->rx_bd_errors
;
1801 nst
->rx_fifo_errors
= (unsigned long)(est
->rx_bd_overrun
+
1802 est
->rx_fifo_overrun
+
1804 nst
->rx_frame_errors
= (unsigned long)(est
->rx_bd_alignment_error
+
1805 est
->rx_alignment_error
);
1806 nst
->rx_crc_errors
= (unsigned long)(est
->rx_bd_bad_fcs
+
1808 nst
->rx_length_errors
= (unsigned long)(est
->rx_bd_runt_packet
+
1809 est
->rx_bd_short_event
+
1810 est
->rx_bd_packet_too_long
+
1811 est
->rx_bd_out_of_range
+
1812 est
->rx_bd_in_range
+
1813 est
->rx_runt_packet
+
1814 est
->rx_short_event
+
1815 est
->rx_packet_too_long
+
1816 est
->rx_out_of_range
+
1819 nst
->tx_errors
= (unsigned long)(est
->tx_bd_errors
+ est
->tx_errors
);
1820 nst
->tx_fifo_errors
= (unsigned long)(est
->tx_bd_underrun
+
1822 nst
->tx_carrier_errors
= (unsigned long)est
->tx_bd_carrier_loss
;
1823 nst
->collisions
= (unsigned long)(est
->tx_bd_excessive_deferral
+
1824 est
->tx_bd_excessive_collisions
+
1825 est
->tx_bd_late_collision
+
1826 est
->tx_bd_multple_collisions
);
1827 spin_unlock_irqrestore(&dev
->lock
, flags
);
1831 static struct mal_commac_ops emac_commac_ops
= {
1832 .poll_tx
= &emac_poll_tx
,
1833 .poll_rx
= &emac_poll_rx
,
1834 .peek_rx
= &emac_peek_rx
,
1838 static struct mal_commac_ops emac_commac_sg_ops
= {
1839 .poll_tx
= &emac_poll_tx
,
1840 .poll_rx
= &emac_poll_rx
,
1841 .peek_rx
= &emac_peek_rx_sg
,
1845 /* Ethtool support */
1846 static int emac_ethtool_get_settings(struct net_device
*ndev
,
1847 struct ethtool_cmd
*cmd
)
1849 struct emac_instance
*dev
= netdev_priv(ndev
);
1851 cmd
->supported
= dev
->phy
.features
;
1852 cmd
->port
= PORT_MII
;
1853 cmd
->phy_address
= dev
->phy
.address
;
1855 dev
->phy
.address
>= 0 ? XCVR_EXTERNAL
: XCVR_INTERNAL
;
1857 mutex_lock(&dev
->link_lock
);
1858 cmd
->advertising
= dev
->phy
.advertising
;
1859 cmd
->autoneg
= dev
->phy
.autoneg
;
1860 cmd
->speed
= dev
->phy
.speed
;
1861 cmd
->duplex
= dev
->phy
.duplex
;
1862 mutex_unlock(&dev
->link_lock
);
1867 static int emac_ethtool_set_settings(struct net_device
*ndev
,
1868 struct ethtool_cmd
*cmd
)
1870 struct emac_instance
*dev
= netdev_priv(ndev
);
1871 u32 f
= dev
->phy
.features
;
1873 DBG(dev
, "set_settings(%d, %d, %d, 0x%08x)" NL
,
1874 cmd
->autoneg
, cmd
->speed
, cmd
->duplex
, cmd
->advertising
);
1876 /* Basic sanity checks */
1877 if (dev
->phy
.address
< 0)
1879 if (cmd
->autoneg
!= AUTONEG_ENABLE
&& cmd
->autoneg
!= AUTONEG_DISABLE
)
1881 if (cmd
->autoneg
== AUTONEG_ENABLE
&& cmd
->advertising
== 0)
1883 if (cmd
->duplex
!= DUPLEX_HALF
&& cmd
->duplex
!= DUPLEX_FULL
)
1886 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
1887 switch (cmd
->speed
) {
1889 if (cmd
->duplex
== DUPLEX_HALF
1890 && !(f
& SUPPORTED_10baseT_Half
))
1892 if (cmd
->duplex
== DUPLEX_FULL
1893 && !(f
& SUPPORTED_10baseT_Full
))
1897 if (cmd
->duplex
== DUPLEX_HALF
1898 && !(f
& SUPPORTED_100baseT_Half
))
1900 if (cmd
->duplex
== DUPLEX_FULL
1901 && !(f
& SUPPORTED_100baseT_Full
))
1905 if (cmd
->duplex
== DUPLEX_HALF
1906 && !(f
& SUPPORTED_1000baseT_Half
))
1908 if (cmd
->duplex
== DUPLEX_FULL
1909 && !(f
& SUPPORTED_1000baseT_Full
))
1916 mutex_lock(&dev
->link_lock
);
1917 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, cmd
->speed
,
1919 mutex_unlock(&dev
->link_lock
);
1922 if (!(f
& SUPPORTED_Autoneg
))
1925 mutex_lock(&dev
->link_lock
);
1926 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
,
1927 (cmd
->advertising
& f
) |
1928 (dev
->phy
.advertising
&
1930 ADVERTISED_Asym_Pause
)));
1931 mutex_unlock(&dev
->link_lock
);
1933 emac_force_link_update(dev
);
1938 static void emac_ethtool_get_ringparam(struct net_device
*ndev
,
1939 struct ethtool_ringparam
*rp
)
1941 rp
->rx_max_pending
= rp
->rx_pending
= NUM_RX_BUFF
;
1942 rp
->tx_max_pending
= rp
->tx_pending
= NUM_TX_BUFF
;
1945 static void emac_ethtool_get_pauseparam(struct net_device
*ndev
,
1946 struct ethtool_pauseparam
*pp
)
1948 struct emac_instance
*dev
= netdev_priv(ndev
);
1950 mutex_lock(&dev
->link_lock
);
1951 if ((dev
->phy
.features
& SUPPORTED_Autoneg
) &&
1952 (dev
->phy
.advertising
& (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
)))
1955 if (dev
->phy
.duplex
== DUPLEX_FULL
) {
1957 pp
->rx_pause
= pp
->tx_pause
= 1;
1958 else if (dev
->phy
.asym_pause
)
1961 mutex_unlock(&dev
->link_lock
);
1964 static u32
emac_ethtool_get_rx_csum(struct net_device
*ndev
)
1966 struct emac_instance
*dev
= netdev_priv(ndev
);
1968 return dev
->tah_dev
!= NULL
;
1971 static int emac_get_regs_len(struct emac_instance
*dev
)
1973 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
))
1974 return sizeof(struct emac_ethtool_regs_subhdr
) +
1975 EMAC4_ETHTOOL_REGS_SIZE
;
1977 return sizeof(struct emac_ethtool_regs_subhdr
) +
1978 EMAC_ETHTOOL_REGS_SIZE
;
1981 static int emac_ethtool_get_regs_len(struct net_device
*ndev
)
1983 struct emac_instance
*dev
= netdev_priv(ndev
);
1986 size
= sizeof(struct emac_ethtool_regs_hdr
) +
1987 emac_get_regs_len(dev
) + mal_get_regs_len(dev
->mal
);
1988 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
1989 size
+= zmii_get_regs_len(dev
->zmii_dev
);
1990 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
1991 size
+= rgmii_get_regs_len(dev
->rgmii_dev
);
1992 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
1993 size
+= tah_get_regs_len(dev
->tah_dev
);
1998 static void *emac_dump_regs(struct emac_instance
*dev
, void *buf
)
2000 struct emac_ethtool_regs_subhdr
*hdr
= buf
;
2002 hdr
->index
= dev
->cell_index
;
2003 if (emac_has_feature(dev
, EMAC_FTR_EMAC4
)) {
2004 hdr
->version
= EMAC4_ETHTOOL_REGS_VER
;
2005 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC4_ETHTOOL_REGS_SIZE
);
2006 return ((void *)(hdr
+ 1) + EMAC4_ETHTOOL_REGS_SIZE
);
2008 hdr
->version
= EMAC_ETHTOOL_REGS_VER
;
2009 memcpy_fromio(hdr
+ 1, dev
->emacp
, EMAC_ETHTOOL_REGS_SIZE
);
2010 return ((void *)(hdr
+ 1) + EMAC_ETHTOOL_REGS_SIZE
);
2014 static void emac_ethtool_get_regs(struct net_device
*ndev
,
2015 struct ethtool_regs
*regs
, void *buf
)
2017 struct emac_instance
*dev
= netdev_priv(ndev
);
2018 struct emac_ethtool_regs_hdr
*hdr
= buf
;
2020 hdr
->components
= 0;
2023 buf
= mal_dump_regs(dev
->mal
, buf
);
2024 buf
= emac_dump_regs(dev
, buf
);
2025 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
)) {
2026 hdr
->components
|= EMAC_ETHTOOL_REGS_ZMII
;
2027 buf
= zmii_dump_regs(dev
->zmii_dev
, buf
);
2029 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
)) {
2030 hdr
->components
|= EMAC_ETHTOOL_REGS_RGMII
;
2031 buf
= rgmii_dump_regs(dev
->rgmii_dev
, buf
);
2033 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
)) {
2034 hdr
->components
|= EMAC_ETHTOOL_REGS_TAH
;
2035 buf
= tah_dump_regs(dev
->tah_dev
, buf
);
2039 static int emac_ethtool_nway_reset(struct net_device
*ndev
)
2041 struct emac_instance
*dev
= netdev_priv(ndev
);
2044 DBG(dev
, "nway_reset" NL
);
2046 if (dev
->phy
.address
< 0)
2049 mutex_lock(&dev
->link_lock
);
2050 if (!dev
->phy
.autoneg
) {
2055 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, dev
->phy
.advertising
);
2057 mutex_unlock(&dev
->link_lock
);
2058 emac_force_link_update(dev
);
2062 static int emac_ethtool_get_stats_count(struct net_device
*ndev
)
2064 return EMAC_ETHTOOL_STATS_COUNT
;
2067 static void emac_ethtool_get_strings(struct net_device
*ndev
, u32 stringset
,
2070 if (stringset
== ETH_SS_STATS
)
2071 memcpy(buf
, &emac_stats_keys
, sizeof(emac_stats_keys
));
2074 static void emac_ethtool_get_ethtool_stats(struct net_device
*ndev
,
2075 struct ethtool_stats
*estats
,
2078 struct emac_instance
*dev
= netdev_priv(ndev
);
2080 memcpy(tmp_stats
, &dev
->stats
, sizeof(dev
->stats
));
2081 tmp_stats
+= sizeof(dev
->stats
) / sizeof(u64
);
2082 memcpy(tmp_stats
, &dev
->estats
, sizeof(dev
->estats
));
2085 static void emac_ethtool_get_drvinfo(struct net_device
*ndev
,
2086 struct ethtool_drvinfo
*info
)
2088 struct emac_instance
*dev
= netdev_priv(ndev
);
2090 strcpy(info
->driver
, "ibm_emac");
2091 strcpy(info
->version
, DRV_VERSION
);
2092 info
->fw_version
[0] = '\0';
2093 sprintf(info
->bus_info
, "PPC 4xx EMAC-%d %s",
2094 dev
->cell_index
, dev
->ofdev
->node
->full_name
);
2095 info
->n_stats
= emac_ethtool_get_stats_count(ndev
);
2096 info
->regdump_len
= emac_ethtool_get_regs_len(ndev
);
2099 static const struct ethtool_ops emac_ethtool_ops
= {
2100 .get_settings
= emac_ethtool_get_settings
,
2101 .set_settings
= emac_ethtool_set_settings
,
2102 .get_drvinfo
= emac_ethtool_get_drvinfo
,
2104 .get_regs_len
= emac_ethtool_get_regs_len
,
2105 .get_regs
= emac_ethtool_get_regs
,
2107 .nway_reset
= emac_ethtool_nway_reset
,
2109 .get_ringparam
= emac_ethtool_get_ringparam
,
2110 .get_pauseparam
= emac_ethtool_get_pauseparam
,
2112 .get_rx_csum
= emac_ethtool_get_rx_csum
,
2114 .get_strings
= emac_ethtool_get_strings
,
2115 .get_stats_count
= emac_ethtool_get_stats_count
,
2116 .get_ethtool_stats
= emac_ethtool_get_ethtool_stats
,
2118 .get_link
= ethtool_op_get_link
,
2119 .get_tx_csum
= ethtool_op_get_tx_csum
,
2120 .get_sg
= ethtool_op_get_sg
,
2123 static int emac_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
2125 struct emac_instance
*dev
= netdev_priv(ndev
);
2126 uint16_t *data
= (uint16_t *) & rq
->ifr_ifru
;
2128 DBG(dev
, "ioctl %08x" NL
, cmd
);
2130 if (dev
->phy
.address
< 0)
2135 case SIOCDEVPRIVATE
:
2136 data
[0] = dev
->phy
.address
;
2139 case SIOCDEVPRIVATE
+ 1:
2140 data
[3] = emac_mdio_read(ndev
, dev
->phy
.address
, data
[1]);
2144 case SIOCDEVPRIVATE
+ 2:
2145 if (!capable(CAP_NET_ADMIN
))
2147 emac_mdio_write(ndev
, dev
->phy
.address
, data
[1], data
[2]);
2154 struct emac_depentry
{
2156 struct device_node
*node
;
2157 struct of_device
*ofdev
;
2161 #define EMAC_DEP_MAL_IDX 0
2162 #define EMAC_DEP_ZMII_IDX 1
2163 #define EMAC_DEP_RGMII_IDX 2
2164 #define EMAC_DEP_TAH_IDX 3
2165 #define EMAC_DEP_MDIO_IDX 4
2166 #define EMAC_DEP_PREV_IDX 5
2167 #define EMAC_DEP_COUNT 6
2169 static int __devinit
emac_check_deps(struct emac_instance
*dev
,
2170 struct emac_depentry
*deps
)
2173 struct device_node
*np
;
2175 for (i
= 0; i
< EMAC_DEP_COUNT
; i
++) {
2176 /* no dependency on that item, allright */
2177 if (deps
[i
].phandle
== 0) {
2181 /* special case for blist as the dependency might go away */
2182 if (i
== EMAC_DEP_PREV_IDX
) {
2183 np
= *(dev
->blist
- 1);
2185 deps
[i
].phandle
= 0;
2189 if (deps
[i
].node
== NULL
)
2190 deps
[i
].node
= of_node_get(np
);
2192 if (deps
[i
].node
== NULL
)
2193 deps
[i
].node
= of_find_node_by_phandle(deps
[i
].phandle
);
2194 if (deps
[i
].node
== NULL
)
2196 if (deps
[i
].ofdev
== NULL
)
2197 deps
[i
].ofdev
= of_find_device_by_node(deps
[i
].node
);
2198 if (deps
[i
].ofdev
== NULL
)
2200 if (deps
[i
].drvdata
== NULL
)
2201 deps
[i
].drvdata
= dev_get_drvdata(&deps
[i
].ofdev
->dev
);
2202 if (deps
[i
].drvdata
!= NULL
)
2205 return (there
== EMAC_DEP_COUNT
);
2208 static void emac_put_deps(struct emac_instance
*dev
)
2211 of_dev_put(dev
->mal_dev
);
2213 of_dev_put(dev
->zmii_dev
);
2215 of_dev_put(dev
->rgmii_dev
);
2217 of_dev_put(dev
->mdio_dev
);
2219 of_dev_put(dev
->tah_dev
);
2222 static int __devinit
emac_of_bus_notify(struct notifier_block
*nb
,
2223 unsigned long action
, void *data
)
2225 /* We are only intereted in device addition */
2226 if (action
== BUS_NOTIFY_BOUND_DRIVER
)
2227 wake_up_all(&emac_probe_wait
);
2231 static struct notifier_block emac_of_bus_notifier
= {
2232 .notifier_call
= emac_of_bus_notify
2235 static int __devinit
emac_wait_deps(struct emac_instance
*dev
)
2237 struct emac_depentry deps
[EMAC_DEP_COUNT
];
2240 memset(&deps
, 0, sizeof(deps
));
2242 deps
[EMAC_DEP_MAL_IDX
].phandle
= dev
->mal_ph
;
2243 deps
[EMAC_DEP_ZMII_IDX
].phandle
= dev
->zmii_ph
;
2244 deps
[EMAC_DEP_RGMII_IDX
].phandle
= dev
->rgmii_ph
;
2246 deps
[EMAC_DEP_TAH_IDX
].phandle
= dev
->tah_ph
;
2248 deps
[EMAC_DEP_MDIO_IDX
].phandle
= dev
->mdio_ph
;
2249 if (dev
->blist
&& dev
->blist
> emac_boot_list
)
2250 deps
[EMAC_DEP_PREV_IDX
].phandle
= 0xffffffffu
;
2251 bus_register_notifier(&of_platform_bus_type
, &emac_of_bus_notifier
);
2252 wait_event_timeout(emac_probe_wait
,
2253 emac_check_deps(dev
, deps
),
2254 EMAC_PROBE_DEP_TIMEOUT
);
2255 bus_unregister_notifier(&of_platform_bus_type
, &emac_of_bus_notifier
);
2256 err
= emac_check_deps(dev
, deps
) ? 0 : -ENODEV
;
2257 for (i
= 0; i
< EMAC_DEP_COUNT
; i
++) {
2259 of_node_put(deps
[i
].node
);
2260 if (err
&& deps
[i
].ofdev
)
2261 of_dev_put(deps
[i
].ofdev
);
2264 dev
->mal_dev
= deps
[EMAC_DEP_MAL_IDX
].ofdev
;
2265 dev
->zmii_dev
= deps
[EMAC_DEP_ZMII_IDX
].ofdev
;
2266 dev
->rgmii_dev
= deps
[EMAC_DEP_RGMII_IDX
].ofdev
;
2267 dev
->tah_dev
= deps
[EMAC_DEP_TAH_IDX
].ofdev
;
2268 dev
->mdio_dev
= deps
[EMAC_DEP_MDIO_IDX
].ofdev
;
2270 if (deps
[EMAC_DEP_PREV_IDX
].ofdev
)
2271 of_dev_put(deps
[EMAC_DEP_PREV_IDX
].ofdev
);
2275 static int __devinit
emac_read_uint_prop(struct device_node
*np
, const char *name
,
2276 u32
*val
, int fatal
)
2279 const u32
*prop
= of_get_property(np
, name
, &len
);
2280 if (prop
== NULL
|| len
< sizeof(u32
)) {
2282 printk(KERN_ERR
"%s: missing %s property\n",
2283 np
->full_name
, name
);
2290 static int __devinit
emac_init_phy(struct emac_instance
*dev
)
2292 struct device_node
*np
= dev
->ofdev
->node
;
2293 struct net_device
*ndev
= dev
->ndev
;
2297 dev
->phy
.dev
= ndev
;
2298 dev
->phy
.mode
= dev
->phy_mode
;
2300 /* PHY-less configuration.
2301 * XXX I probably should move these settings to the dev tree
2303 if (dev
->phy_address
== 0xffffffff && dev
->phy_map
== 0xffffffff) {
2306 /* PHY-less configuration.
2307 * XXX I probably should move these settings to the dev tree
2309 dev
->phy
.address
= -1;
2310 dev
->phy
.features
= SUPPORTED_100baseT_Full
| SUPPORTED_MII
;
2316 mutex_lock(&emac_phy_map_lock
);
2317 phy_map
= dev
->phy_map
| busy_phy_map
;
2319 DBG(dev
, "PHY maps %08x %08x" NL
, dev
->phy_map
, busy_phy_map
);
2321 dev
->phy
.mdio_read
= emac_mdio_read
;
2322 dev
->phy
.mdio_write
= emac_mdio_write
;
2324 /* Configure EMAC with defaults so we can at least use MDIO
2325 * This is needed mostly for 440GX
2327 if (emac_phy_gpcs(dev
->phy
.mode
)) {
2329 * Make GPCS PHY address equal to EMAC index.
2330 * We probably should take into account busy_phy_map
2331 * and/or phy_map here.
2333 * Note that the busy_phy_map is currently global
2334 * while it should probably be per-ASIC...
2336 dev
->phy
.address
= dev
->cell_index
;
2339 emac_configure(dev
);
2341 if (dev
->phy_address
!= 0xffffffff)
2342 phy_map
= ~(1 << dev
->phy_address
);
2344 for (i
= 0; i
< 0x20; phy_map
>>= 1, ++i
)
2345 if (!(phy_map
& 1)) {
2347 busy_phy_map
|= 1 << i
;
2349 /* Quick check if there is a PHY at the address */
2350 r
= emac_mdio_read(dev
->ndev
, i
, MII_BMCR
);
2351 if (r
== 0xffff || r
< 0)
2353 if (!emac_mii_phy_probe(&dev
->phy
, i
))
2356 mutex_unlock(&emac_phy_map_lock
);
2358 printk(KERN_WARNING
"%s: can't find PHY!\n", np
->full_name
);
2363 if (dev
->phy
.def
->ops
->init
)
2364 dev
->phy
.def
->ops
->init(&dev
->phy
);
2366 /* Disable any PHY features not supported by the platform */
2367 dev
->phy
.def
->features
&= ~dev
->phy_feat_exc
;
2369 /* Setup initial link parameters */
2370 if (dev
->phy
.features
& SUPPORTED_Autoneg
) {
2371 adv
= dev
->phy
.features
;
2372 if (!emac_has_feature(dev
, EMAC_FTR_NO_FLOW_CONTROL_40x
))
2373 adv
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
2374 /* Restart autonegotiation */
2375 dev
->phy
.def
->ops
->setup_aneg(&dev
->phy
, adv
);
2377 u32 f
= dev
->phy
.def
->features
;
2378 int speed
= SPEED_10
, fd
= DUPLEX_HALF
;
2380 /* Select highest supported speed/duplex */
2381 if (f
& SUPPORTED_1000baseT_Full
) {
2384 } else if (f
& SUPPORTED_1000baseT_Half
)
2386 else if (f
& SUPPORTED_100baseT_Full
) {
2389 } else if (f
& SUPPORTED_100baseT_Half
)
2391 else if (f
& SUPPORTED_10baseT_Full
)
2394 /* Force link parameters */
2395 dev
->phy
.def
->ops
->setup_forced(&dev
->phy
, speed
, fd
);
2400 static int __devinit
emac_init_config(struct emac_instance
*dev
)
2402 struct device_node
*np
= dev
->ofdev
->node
;
2405 const char *pm
, *phy_modes
[] = {
2407 [PHY_MODE_MII
] = "mii",
2408 [PHY_MODE_RMII
] = "rmii",
2409 [PHY_MODE_SMII
] = "smii",
2410 [PHY_MODE_RGMII
] = "rgmii",
2411 [PHY_MODE_TBI
] = "tbi",
2412 [PHY_MODE_GMII
] = "gmii",
2413 [PHY_MODE_RTBI
] = "rtbi",
2414 [PHY_MODE_SGMII
] = "sgmii",
2417 /* Read config from device-tree */
2418 if (emac_read_uint_prop(np
, "mal-device", &dev
->mal_ph
, 1))
2420 if (emac_read_uint_prop(np
, "mal-tx-channel", &dev
->mal_tx_chan
, 1))
2422 if (emac_read_uint_prop(np
, "mal-rx-channel", &dev
->mal_rx_chan
, 1))
2424 if (emac_read_uint_prop(np
, "cell-index", &dev
->cell_index
, 1))
2426 if (emac_read_uint_prop(np
, "max-frame-size", &dev
->max_mtu
, 0))
2427 dev
->max_mtu
= 1500;
2428 if (emac_read_uint_prop(np
, "rx-fifo-size", &dev
->rx_fifo_size
, 0))
2429 dev
->rx_fifo_size
= 2048;
2430 if (emac_read_uint_prop(np
, "tx-fifo-size", &dev
->tx_fifo_size
, 0))
2431 dev
->tx_fifo_size
= 2048;
2432 if (emac_read_uint_prop(np
, "rx-fifo-size-gige", &dev
->rx_fifo_size_gige
, 0))
2433 dev
->rx_fifo_size_gige
= dev
->rx_fifo_size
;
2434 if (emac_read_uint_prop(np
, "tx-fifo-size-gige", &dev
->tx_fifo_size_gige
, 0))
2435 dev
->tx_fifo_size_gige
= dev
->tx_fifo_size
;
2436 if (emac_read_uint_prop(np
, "phy-address", &dev
->phy_address
, 0))
2437 dev
->phy_address
= 0xffffffff;
2438 if (emac_read_uint_prop(np
, "phy-map", &dev
->phy_map
, 0))
2439 dev
->phy_map
= 0xffffffff;
2440 if (emac_read_uint_prop(np
->parent
, "clock-frequency", &dev
->opb_bus_freq
, 1))
2442 if (emac_read_uint_prop(np
, "tah-device", &dev
->tah_ph
, 0))
2444 if (emac_read_uint_prop(np
, "tah-channel", &dev
->tah_port
, 0))
2446 if (emac_read_uint_prop(np
, "mdio-device", &dev
->mdio_ph
, 0))
2448 if (emac_read_uint_prop(np
, "zmii-device", &dev
->zmii_ph
, 0))
2450 if (emac_read_uint_prop(np
, "zmii-channel", &dev
->zmii_port
, 0))
2451 dev
->zmii_port
= 0xffffffff;;
2452 if (emac_read_uint_prop(np
, "rgmii-device", &dev
->rgmii_ph
, 0))
2454 if (emac_read_uint_prop(np
, "rgmii-channel", &dev
->rgmii_port
, 0))
2455 dev
->rgmii_port
= 0xffffffff;;
2456 if (emac_read_uint_prop(np
, "fifo-entry-size", &dev
->fifo_entry_size
, 0))
2457 dev
->fifo_entry_size
= 16;
2458 if (emac_read_uint_prop(np
, "mal-burst-size", &dev
->mal_burst_size
, 0))
2459 dev
->mal_burst_size
= 256;
2461 /* PHY mode needs some decoding */
2462 dev
->phy_mode
= PHY_MODE_NA
;
2463 pm
= of_get_property(np
, "phy-mode", &plen
);
2466 for (i
= 0; i
< ARRAY_SIZE(phy_modes
); i
++)
2467 if (!strcasecmp(pm
, phy_modes
[i
])) {
2473 /* Backward compat with non-final DT */
2474 if (dev
->phy_mode
== PHY_MODE_NA
&& pm
!= NULL
&& plen
== 4) {
2475 u32 nmode
= *(const u32
*)pm
;
2476 if (nmode
> PHY_MODE_NA
&& nmode
<= PHY_MODE_SGMII
)
2477 dev
->phy_mode
= nmode
;
2480 /* Check EMAC version */
2481 if (of_device_is_compatible(np
, "ibm,emac4"))
2482 dev
->features
|= EMAC_FTR_EMAC4
;
2484 /* Fixup some feature bits based on the device tree */
2485 if (of_get_property(np
, "has-inverted-stacr-oc", NULL
))
2486 dev
->features
|= EMAC_FTR_STACR_OC_INVERT
;
2487 if (of_get_property(np
, "has-new-stacr-staopc", NULL
))
2488 dev
->features
|= EMAC_FTR_HAS_NEW_STACR
;
2490 /* CAB lacks the appropriate properties */
2491 if (of_device_is_compatible(np
, "ibm,emac-axon"))
2492 dev
->features
|= EMAC_FTR_HAS_NEW_STACR
|
2493 EMAC_FTR_STACR_OC_INVERT
;
2495 /* Enable TAH/ZMII/RGMII features as found */
2496 if (dev
->tah_ph
!= 0) {
2497 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2498 dev
->features
|= EMAC_FTR_HAS_TAH
;
2500 printk(KERN_ERR
"%s: TAH support not enabled !\n",
2506 if (dev
->zmii_ph
!= 0) {
2507 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2508 dev
->features
|= EMAC_FTR_HAS_ZMII
;
2510 printk(KERN_ERR
"%s: ZMII support not enabled !\n",
2516 if (dev
->rgmii_ph
!= 0) {
2517 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2518 dev
->features
|= EMAC_FTR_HAS_RGMII
;
2520 printk(KERN_ERR
"%s: RGMII support not enabled !\n",
2526 /* Read MAC-address */
2527 p
= of_get_property(np
, "local-mac-address", NULL
);
2529 printk(KERN_ERR
"%s: Can't find local-mac-address property\n",
2533 memcpy(dev
->ndev
->dev_addr
, p
, 6);
2535 DBG(dev
, "features : 0x%08x / 0x%08x\n", dev
->features
, EMAC_FTRS_POSSIBLE
);
2536 DBG(dev
, "tx_fifo_size : %d (%d gige)\n", dev
->tx_fifo_size
, dev
->tx_fifo_size_gige
);
2537 DBG(dev
, "rx_fifo_size : %d (%d gige)\n", dev
->rx_fifo_size
, dev
->rx_fifo_size_gige
);
2538 DBG(dev
, "max_mtu : %d\n", dev
->max_mtu
);
2539 DBG(dev
, "OPB freq : %d\n", dev
->opb_bus_freq
);
2544 static int __devinit
emac_probe(struct of_device
*ofdev
,
2545 const struct of_device_id
*match
)
2547 struct net_device
*ndev
;
2548 struct emac_instance
*dev
;
2549 struct device_node
*np
= ofdev
->node
;
2550 struct device_node
**blist
= NULL
;
2553 /* Find ourselves in the bootlist if we are there */
2554 for (i
= 0; i
< EMAC_BOOT_LIST_SIZE
; i
++)
2555 if (emac_boot_list
[i
] == np
)
2556 blist
= &emac_boot_list
[i
];
2558 /* Allocate our net_device structure */
2560 ndev
= alloc_etherdev(sizeof(struct emac_instance
));
2562 printk(KERN_ERR
"%s: could not allocate ethernet device!\n",
2566 dev
= netdev_priv(ndev
);
2570 SET_NETDEV_DEV(ndev
, &ofdev
->dev
);
2572 /* Initialize some embedded data structures */
2573 mutex_init(&dev
->mdio_lock
);
2574 mutex_init(&dev
->link_lock
);
2575 spin_lock_init(&dev
->lock
);
2576 INIT_WORK(&dev
->reset_work
, emac_reset_work
);
2578 /* Init various config data based on device-tree */
2579 err
= emac_init_config(dev
);
2583 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2584 dev
->emac_irq
= irq_of_parse_and_map(np
, 0);
2585 dev
->wol_irq
= irq_of_parse_and_map(np
, 1);
2586 if (dev
->emac_irq
== NO_IRQ
) {
2587 printk(KERN_ERR
"%s: Can't map main interrupt\n", np
->full_name
);
2590 ndev
->irq
= dev
->emac_irq
;
2593 if (of_address_to_resource(np
, 0, &dev
->rsrc_regs
)) {
2594 printk(KERN_ERR
"%s: Can't get registers address\n",
2598 // TODO : request_mem_region
2599 dev
->emacp
= ioremap(dev
->rsrc_regs
.start
, sizeof(struct emac_regs
));
2600 if (dev
->emacp
== NULL
) {
2601 printk(KERN_ERR
"%s: Can't map device registers!\n",
2607 /* Wait for dependent devices */
2608 err
= emac_wait_deps(dev
);
2611 "%s: Timeout waiting for dependent devices\n",
2613 /* display more info about what's missing ? */
2616 dev
->mal
= dev_get_drvdata(&dev
->mal_dev
->dev
);
2617 if (dev
->mdio_dev
!= NULL
)
2618 dev
->mdio_instance
= dev_get_drvdata(&dev
->mdio_dev
->dev
);
2620 /* Register with MAL */
2621 dev
->commac
.ops
= &emac_commac_ops
;
2622 dev
->commac
.dev
= dev
;
2623 dev
->commac
.tx_chan_mask
= MAL_CHAN_MASK(dev
->mal_tx_chan
);
2624 dev
->commac
.rx_chan_mask
= MAL_CHAN_MASK(dev
->mal_rx_chan
);
2625 err
= mal_register_commac(dev
->mal
, &dev
->commac
);
2627 printk(KERN_ERR
"%s: failed to register with mal %s!\n",
2628 np
->full_name
, dev
->mal_dev
->node
->full_name
);
2631 dev
->rx_skb_size
= emac_rx_skb_size(ndev
->mtu
);
2632 dev
->rx_sync_size
= emac_rx_sync_size(ndev
->mtu
);
2634 /* Get pointers to BD rings */
2636 dev
->mal
->bd_virt
+ mal_tx_bd_offset(dev
->mal
, dev
->mal_tx_chan
);
2638 dev
->mal
->bd_virt
+ mal_rx_bd_offset(dev
->mal
, dev
->mal_rx_chan
);
2640 DBG(dev
, "tx_desc %p" NL
, dev
->tx_desc
);
2641 DBG(dev
, "rx_desc %p" NL
, dev
->rx_desc
);
2644 memset(dev
->tx_desc
, 0, NUM_TX_BUFF
* sizeof(struct mal_descriptor
));
2645 memset(dev
->rx_desc
, 0, NUM_RX_BUFF
* sizeof(struct mal_descriptor
));
2647 /* Attach to ZMII, if needed */
2648 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
) &&
2649 (err
= zmii_attach(dev
->zmii_dev
, dev
->zmii_port
, &dev
->phy_mode
)) != 0)
2650 goto err_unreg_commac
;
2652 /* Attach to RGMII, if needed */
2653 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
) &&
2654 (err
= rgmii_attach(dev
->rgmii_dev
, dev
->rgmii_port
, dev
->phy_mode
)) != 0)
2655 goto err_detach_zmii
;
2657 /* Attach to TAH, if needed */
2658 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
) &&
2659 (err
= tah_attach(dev
->tah_dev
, dev
->tah_port
)) != 0)
2660 goto err_detach_rgmii
;
2662 /* Set some link defaults before we can find out real parameters */
2663 dev
->phy
.speed
= SPEED_100
;
2664 dev
->phy
.duplex
= DUPLEX_FULL
;
2665 dev
->phy
.autoneg
= AUTONEG_DISABLE
;
2666 dev
->phy
.pause
= dev
->phy
.asym_pause
= 0;
2667 dev
->stop_timeout
= STOP_TIMEOUT_100
;
2668 INIT_DELAYED_WORK(&dev
->link_work
, emac_link_timer
);
2670 /* Find PHY if any */
2671 err
= emac_init_phy(dev
);
2673 goto err_detach_tah
;
2675 /* Fill in the driver function table */
2676 ndev
->open
= &emac_open
;
2677 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2679 ndev
->hard_start_xmit
= &emac_start_xmit_sg
;
2680 ndev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
2683 ndev
->hard_start_xmit
= &emac_start_xmit
;
2684 ndev
->tx_timeout
= &emac_tx_timeout
;
2685 ndev
->watchdog_timeo
= 5 * HZ
;
2686 ndev
->stop
= &emac_close
;
2687 ndev
->get_stats
= &emac_stats
;
2688 ndev
->set_multicast_list
= &emac_set_multicast_list
;
2689 ndev
->do_ioctl
= &emac_ioctl
;
2690 if (emac_phy_supports_gige(dev
->phy_mode
)) {
2691 ndev
->change_mtu
= &emac_change_mtu
;
2692 dev
->commac
.ops
= &emac_commac_sg_ops
;
2694 SET_ETHTOOL_OPS(ndev
, &emac_ethtool_ops
);
2696 netif_carrier_off(ndev
);
2697 netif_stop_queue(ndev
);
2699 err
= register_netdev(ndev
);
2701 printk(KERN_ERR
"%s: failed to register net device (%d)!\n",
2702 np
->full_name
, err
);
2703 goto err_detach_tah
;
2706 /* Set our drvdata last as we don't want them visible until we are
2710 dev_set_drvdata(&ofdev
->dev
, dev
);
2712 /* There's a new kid in town ! Let's tell everybody */
2713 wake_up_all(&emac_probe_wait
);
2717 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2718 ndev
->name
, dev
->cell_index
, np
->full_name
,
2719 ndev
->dev_addr
[0], ndev
->dev_addr
[1], ndev
->dev_addr
[2],
2720 ndev
->dev_addr
[3], ndev
->dev_addr
[4], ndev
->dev_addr
[5]);
2722 if (dev
->phy
.address
>= 0)
2723 printk("%s: found %s PHY (0x%02x)\n", ndev
->name
,
2724 dev
->phy
.def
->name
, dev
->phy
.address
);
2726 emac_dbg_register(dev
);
2731 /* I have a bad feeling about this ... */
2734 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2735 tah_detach(dev
->tah_dev
, dev
->tah_port
);
2737 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2738 rgmii_detach(dev
->rgmii_dev
, dev
->rgmii_port
);
2740 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2741 zmii_detach(dev
->zmii_dev
, dev
->zmii_port
);
2743 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2747 iounmap(dev
->emacp
);
2749 if (dev
->wol_irq
!= NO_IRQ
)
2750 irq_dispose_mapping(dev
->wol_irq
);
2751 if (dev
->emac_irq
!= NO_IRQ
)
2752 irq_dispose_mapping(dev
->emac_irq
);
2756 /* if we were on the bootlist, remove us as we won't show up and
2757 * wake up all waiters to notify them in case they were waiting
2762 wake_up_all(&emac_probe_wait
);
2767 static int __devexit
emac_remove(struct of_device
*ofdev
)
2769 struct emac_instance
*dev
= dev_get_drvdata(&ofdev
->dev
);
2771 DBG(dev
, "remove" NL
);
2773 dev_set_drvdata(&ofdev
->dev
, NULL
);
2775 unregister_netdev(dev
->ndev
);
2777 flush_scheduled_work();
2779 if (emac_has_feature(dev
, EMAC_FTR_HAS_TAH
))
2780 tah_detach(dev
->tah_dev
, dev
->tah_port
);
2781 if (emac_has_feature(dev
, EMAC_FTR_HAS_RGMII
))
2782 rgmii_detach(dev
->rgmii_dev
, dev
->rgmii_port
);
2783 if (emac_has_feature(dev
, EMAC_FTR_HAS_ZMII
))
2784 zmii_detach(dev
->zmii_dev
, dev
->zmii_port
);
2786 mal_unregister_commac(dev
->mal
, &dev
->commac
);
2789 emac_dbg_unregister(dev
);
2790 iounmap(dev
->emacp
);
2792 if (dev
->wol_irq
!= NO_IRQ
)
2793 irq_dispose_mapping(dev
->wol_irq
);
2794 if (dev
->emac_irq
!= NO_IRQ
)
2795 irq_dispose_mapping(dev
->emac_irq
);
2802 /* XXX Features in here should be replaced by properties... */
2803 static struct of_device_id emac_match
[] =
2807 .compatible
= "ibm,emac",
2811 .compatible
= "ibm,emac4",
2816 static struct of_platform_driver emac_driver
= {
2818 .match_table
= emac_match
,
2820 .probe
= emac_probe
,
2821 .remove
= emac_remove
,
2824 static void __init
emac_make_bootlist(void)
2826 struct device_node
*np
= NULL
;
2827 int j
, max
, i
= 0, k
;
2828 int cell_indices
[EMAC_BOOT_LIST_SIZE
];
2831 while((np
= of_find_all_nodes(np
)) != NULL
) {
2834 if (of_match_node(emac_match
, np
) == NULL
)
2836 if (of_get_property(np
, "unused", NULL
))
2838 idx
= of_get_property(np
, "cell-index", NULL
);
2841 cell_indices
[i
] = *idx
;
2842 emac_boot_list
[i
++] = of_node_get(np
);
2843 if (i
>= EMAC_BOOT_LIST_SIZE
) {
2850 /* Bubble sort them (doh, what a creative algorithm :-) */
2851 for (i
= 0; max
> 1 && (i
< (max
- 1)); i
++)
2852 for (j
= i
; j
< max
; j
++) {
2853 if (cell_indices
[i
] > cell_indices
[j
]) {
2854 np
= emac_boot_list
[i
];
2855 emac_boot_list
[i
] = emac_boot_list
[j
];
2856 emac_boot_list
[j
] = np
;
2857 k
= cell_indices
[i
];
2858 cell_indices
[i
] = cell_indices
[j
];
2859 cell_indices
[j
] = k
;
2864 static int __init
emac_init(void)
2868 printk(KERN_INFO DRV_DESC
", version " DRV_VERSION
"\n");
2870 /* Init debug stuff */
2873 /* Build EMAC boot list */
2874 emac_make_bootlist();
2876 /* Init submodules */
2889 rc
= of_register_platform_driver(&emac_driver
);
2907 static void __exit
emac_exit(void)
2911 of_unregister_platform_driver(&emac_driver
);
2919 /* Destroy EMAC boot list */
2920 for (i
= 0; i
< EMAC_BOOT_LIST_SIZE
; i
++)
2921 if (emac_boot_list
[i
])
2922 of_node_put(emac_boot_list
[i
]);
2925 module_init(emac_init
);
2926 module_exit(emac_exit
);