hpt366: write the full 4 bytes of ROM address, not just low 1 byte
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / sungem.c
CommitLineData
1da177e4
LT
1/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
2 * sungem.c: Sun GEM ethernet driver.
3 *
4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
5 *
6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management
7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
9 *
10 * NAPI and NETPOLL support
11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
12 *
13 * TODO:
14 * - Now that the driver was significantly simplified, I need to rework
15 * the locking. I'm sure we don't need _2_ spinlocks, and we probably
16 * can avoid taking most of them for so long period of time (and schedule
17 * instead). The main issues at this point are caused by the netdev layer
18 * though:
19 *
20 * gem_change_mtu() and gem_set_multicast() are called with a read_lock()
21 * help by net/core/dev.c, thus they can't schedule. That means they can't
22 * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock
23 * where it could have been dropped. change_mtu especially would love also to
24 * be able to msleep instead of horrid locked delays when resetting the HW,
25 * but that read_lock() makes it impossible, unless I defer it's action to
26 * the reset task, which means it'll be asynchronous (won't take effect until
27 * the system schedules a bit).
28 *
29 * Also, it would probably be possible to also remove most of the long-life
30 * locking in open/resume code path (gem_reinit_chip) by beeing more careful
31 * about when we can start taking interrupts or get xmit() called...
32 */
33
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/types.h>
37#include <linux/fcntl.h>
38#include <linux/interrupt.h>
39#include <linux/ioport.h>
40#include <linux/in.h>
41#include <linux/slab.h>
42#include <linux/string.h>
43#include <linux/delay.h>
44#include <linux/init.h>
45#include <linux/errno.h>
46#include <linux/pci.h>
1e7f0bd8 47#include <linux/dma-mapping.h>
1da177e4
LT
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/skbuff.h>
51#include <linux/mii.h>
52#include <linux/ethtool.h>
53#include <linux/crc32.h>
54#include <linux/random.h>
55#include <linux/workqueue.h>
56#include <linux/if_vlan.h>
57#include <linux/bitops.h>
58
59#include <asm/system.h>
60#include <asm/io.h>
61#include <asm/byteorder.h>
62#include <asm/uaccess.h>
63#include <asm/irq.h>
64
65#ifdef __sparc__
66#include <asm/idprom.h>
67#include <asm/openprom.h>
68#include <asm/oplib.h>
69#include <asm/pbm.h>
70#endif
71
72#ifdef CONFIG_PPC_PMAC
73#include <asm/pci-bridge.h>
74#include <asm/prom.h>
75#include <asm/machdep.h>
76#include <asm/pmac_feature.h>
77#endif
78
79#include "sungem_phy.h"
80#include "sungem.h"
81
82/* Stripping FCS is causing problems, disabled for now */
83#undef STRIP_FCS
84
85#define DEFAULT_MSG (NETIF_MSG_DRV | \
86 NETIF_MSG_PROBE | \
87 NETIF_MSG_LINK)
88
89#define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
90 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
91 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
92
93#define DRV_NAME "sungem"
94#define DRV_VERSION "0.98"
95#define DRV_RELDATE "8/24/03"
96#define DRV_AUTHOR "David S. Miller (davem@redhat.com)"
97
98static char version[] __devinitdata =
99 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
100
101MODULE_AUTHOR(DRV_AUTHOR);
102MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
103MODULE_LICENSE("GPL");
104
105#define GEM_MODULE_NAME "gem"
106#define PFX GEM_MODULE_NAME ": "
107
108static struct pci_device_id gem_pci_tbl[] = {
109 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
111
112 /* These models only differ from the original GEM in
113 * that their tx/rx fifos are of a different size and
114 * they only support 10/100 speeds. -DaveM
115 *
116 * Apple's GMAC does support gigabit on machines with
117 * the BCM54xx PHYs. -BenH
118 */
119 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
121 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
123 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
125 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
127 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
129 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
131 {0, }
132};
133
134MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
135
136static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
137{
138 u32 cmd;
139 int limit = 10000;
140
141 cmd = (1 << 30);
142 cmd |= (2 << 28);
143 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
144 cmd |= (reg << 18) & MIF_FRAME_REGAD;
145 cmd |= (MIF_FRAME_TAMSB);
146 writel(cmd, gp->regs + MIF_FRAME);
147
148 while (limit--) {
149 cmd = readl(gp->regs + MIF_FRAME);
150 if (cmd & MIF_FRAME_TALSB)
151 break;
152
153 udelay(10);
154 }
155
156 if (!limit)
157 cmd = 0xffff;
158
159 return cmd & MIF_FRAME_DATA;
160}
161
162static inline int _phy_read(struct net_device *dev, int mii_id, int reg)
163{
164 struct gem *gp = dev->priv;
165 return __phy_read(gp, mii_id, reg);
166}
167
168static inline u16 phy_read(struct gem *gp, int reg)
169{
170 return __phy_read(gp, gp->mii_phy_addr, reg);
171}
172
173static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
174{
175 u32 cmd;
176 int limit = 10000;
177
178 cmd = (1 << 30);
179 cmd |= (1 << 28);
180 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
181 cmd |= (reg << 18) & MIF_FRAME_REGAD;
182 cmd |= (MIF_FRAME_TAMSB);
183 cmd |= (val & MIF_FRAME_DATA);
184 writel(cmd, gp->regs + MIF_FRAME);
185
186 while (limit--) {
187 cmd = readl(gp->regs + MIF_FRAME);
188 if (cmd & MIF_FRAME_TALSB)
189 break;
190
191 udelay(10);
192 }
193}
194
195static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val)
196{
197 struct gem *gp = dev->priv;
198 __phy_write(gp, mii_id, reg, val & 0xffff);
199}
200
201static inline void phy_write(struct gem *gp, int reg, u16 val)
202{
203 __phy_write(gp, gp->mii_phy_addr, reg, val);
204}
205
206static inline void gem_enable_ints(struct gem *gp)
207{
208 /* Enable all interrupts but TXDONE */
209 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
210}
211
212static inline void gem_disable_ints(struct gem *gp)
213{
214 /* Disable all interrupts, including TXDONE */
215 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
216}
217
218static void gem_get_cell(struct gem *gp)
219{
220 BUG_ON(gp->cell_enabled < 0);
221 gp->cell_enabled++;
222#ifdef CONFIG_PPC_PMAC
223 if (gp->cell_enabled == 1) {
224 mb();
225 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
226 udelay(10);
227 }
228#endif /* CONFIG_PPC_PMAC */
229}
230
231/* Turn off the chip's clock */
232static void gem_put_cell(struct gem *gp)
233{
234 BUG_ON(gp->cell_enabled <= 0);
235 gp->cell_enabled--;
236#ifdef CONFIG_PPC_PMAC
237 if (gp->cell_enabled == 0) {
238 mb();
239 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
240 udelay(10);
241 }
242#endif /* CONFIG_PPC_PMAC */
243}
244
245static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
246{
247 if (netif_msg_intr(gp))
248 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
249}
250
251static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
252{
253 u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
254 u32 pcs_miistat;
255
256 if (netif_msg_intr(gp))
257 printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
258 gp->dev->name, pcs_istat);
259
260 if (!(pcs_istat & PCS_ISTAT_LSC)) {
261 printk(KERN_ERR "%s: PCS irq but no link status change???\n",
262 dev->name);
263 return 0;
264 }
265
266 /* The link status bit latches on zero, so you must
267 * read it twice in such a case to see a transition
268 * to the link being up.
269 */
270 pcs_miistat = readl(gp->regs + PCS_MIISTAT);
271 if (!(pcs_miistat & PCS_MIISTAT_LS))
272 pcs_miistat |=
273 (readl(gp->regs + PCS_MIISTAT) &
274 PCS_MIISTAT_LS);
275
276 if (pcs_miistat & PCS_MIISTAT_ANC) {
277 /* The remote-fault indication is only valid
278 * when autoneg has completed.
279 */
280 if (pcs_miistat & PCS_MIISTAT_RF)
281 printk(KERN_INFO "%s: PCS AutoNEG complete, "
282 "RemoteFault\n", dev->name);
283 else
284 printk(KERN_INFO "%s: PCS AutoNEG complete.\n",
285 dev->name);
286 }
287
288 if (pcs_miistat & PCS_MIISTAT_LS) {
289 printk(KERN_INFO "%s: PCS link is now up.\n",
290 dev->name);
291 netif_carrier_on(gp->dev);
292 } else {
293 printk(KERN_INFO "%s: PCS link is now down.\n",
294 dev->name);
295 netif_carrier_off(gp->dev);
296 /* If this happens and the link timer is not running,
297 * reset so we re-negotiate.
298 */
299 if (!timer_pending(&gp->link_timer))
300 return 1;
301 }
302
303 return 0;
304}
305
306static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
307{
308 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
309
310 if (netif_msg_intr(gp))
311 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
312 gp->dev->name, txmac_stat);
313
314 /* Defer timer expiration is quite normal,
315 * don't even log the event.
316 */
317 if ((txmac_stat & MAC_TXSTAT_DTE) &&
318 !(txmac_stat & ~MAC_TXSTAT_DTE))
319 return 0;
320
321 if (txmac_stat & MAC_TXSTAT_URUN) {
322 printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
323 dev->name);
324 gp->net_stats.tx_fifo_errors++;
325 }
326
327 if (txmac_stat & MAC_TXSTAT_MPE) {
328 printk(KERN_ERR "%s: TX MAC max packet size error.\n",
329 dev->name);
330 gp->net_stats.tx_errors++;
331 }
332
333 /* The rest are all cases of one of the 16-bit TX
334 * counters expiring.
335 */
336 if (txmac_stat & MAC_TXSTAT_NCE)
337 gp->net_stats.collisions += 0x10000;
338
339 if (txmac_stat & MAC_TXSTAT_ECE) {
340 gp->net_stats.tx_aborted_errors += 0x10000;
341 gp->net_stats.collisions += 0x10000;
342 }
343
344 if (txmac_stat & MAC_TXSTAT_LCE) {
345 gp->net_stats.tx_aborted_errors += 0x10000;
346 gp->net_stats.collisions += 0x10000;
347 }
348
349 /* We do not keep track of MAC_TXSTAT_FCE and
350 * MAC_TXSTAT_PCE events.
351 */
352 return 0;
353}
354
355/* When we get a RX fifo overflow, the RX unit in GEM is probably hung
356 * so we do the following.
357 *
358 * If any part of the reset goes wrong, we return 1 and that causes the
359 * whole chip to be reset.
360 */
361static int gem_rxmac_reset(struct gem *gp)
362{
363 struct net_device *dev = gp->dev;
364 int limit, i;
365 u64 desc_dma;
366 u32 val;
367
368 /* First, reset & disable MAC RX. */
369 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
370 for (limit = 0; limit < 5000; limit++) {
371 if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
372 break;
373 udelay(10);
374 }
375 if (limit == 5000) {
376 printk(KERN_ERR "%s: RX MAC will not reset, resetting whole "
377 "chip.\n", dev->name);
378 return 1;
379 }
380
381 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
382 gp->regs + MAC_RXCFG);
383 for (limit = 0; limit < 5000; limit++) {
384 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
385 break;
386 udelay(10);
387 }
388 if (limit == 5000) {
389 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
390 "chip.\n", dev->name);
391 return 1;
392 }
393
394 /* Second, disable RX DMA. */
395 writel(0, gp->regs + RXDMA_CFG);
396 for (limit = 0; limit < 5000; limit++) {
397 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
398 break;
399 udelay(10);
400 }
401 if (limit == 5000) {
402 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
403 "chip.\n", dev->name);
404 return 1;
405 }
406
407 udelay(5000);
408
409 /* Execute RX reset command. */
410 writel(gp->swrst_base | GREG_SWRST_RXRST,
411 gp->regs + GREG_SWRST);
412 for (limit = 0; limit < 5000; limit++) {
413 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
414 break;
415 udelay(10);
416 }
417 if (limit == 5000) {
418 printk(KERN_ERR "%s: RX reset command will not execute, resetting "
419 "whole chip.\n", dev->name);
420 return 1;
421 }
422
423 /* Refresh the RX ring. */
424 for (i = 0; i < RX_RING_SIZE; i++) {
425 struct gem_rxd *rxd = &gp->init_block->rxd[i];
426
427 if (gp->rx_skbs[i] == NULL) {
428 printk(KERN_ERR "%s: Parts of RX ring empty, resetting "
429 "whole chip.\n", dev->name);
430 return 1;
431 }
432
433 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
434 }
435 gp->rx_new = gp->rx_old = 0;
436
437 /* Now we must reprogram the rest of RX unit. */
438 desc_dma = (u64) gp->gblock_dvma;
439 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
440 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
441 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
442 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
443 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
444 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
445 writel(val, gp->regs + RXDMA_CFG);
446 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
447 writel(((5 & RXDMA_BLANK_IPKTS) |
448 ((8 << 12) & RXDMA_BLANK_ITIME)),
449 gp->regs + RXDMA_BLANK);
450 else
451 writel(((5 & RXDMA_BLANK_IPKTS) |
452 ((4 << 12) & RXDMA_BLANK_ITIME)),
453 gp->regs + RXDMA_BLANK);
454 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
455 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
456 writel(val, gp->regs + RXDMA_PTHRESH);
457 val = readl(gp->regs + RXDMA_CFG);
458 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
459 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
460 val = readl(gp->regs + MAC_RXCFG);
461 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
462
463 return 0;
464}
465
466static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
467{
468 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
469 int ret = 0;
470
471 if (netif_msg_intr(gp))
472 printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
473 gp->dev->name, rxmac_stat);
474
475 if (rxmac_stat & MAC_RXSTAT_OFLW) {
476 u32 smac = readl(gp->regs + MAC_SMACHINE);
477
478 printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n",
479 dev->name, smac);
480 gp->net_stats.rx_over_errors++;
481 gp->net_stats.rx_fifo_errors++;
482
483 ret = gem_rxmac_reset(gp);
484 }
485
486 if (rxmac_stat & MAC_RXSTAT_ACE)
487 gp->net_stats.rx_frame_errors += 0x10000;
488
489 if (rxmac_stat & MAC_RXSTAT_CCE)
490 gp->net_stats.rx_crc_errors += 0x10000;
491
492 if (rxmac_stat & MAC_RXSTAT_LCE)
493 gp->net_stats.rx_length_errors += 0x10000;
494
495 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
496 * events.
497 */
498 return ret;
499}
500
501static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
502{
503 u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
504
505 if (netif_msg_intr(gp))
506 printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
507 gp->dev->name, mac_cstat);
508
509 /* This interrupt is just for pause frame and pause
510 * tracking. It is useful for diagnostics and debug
511 * but probably by default we will mask these events.
512 */
513 if (mac_cstat & MAC_CSTAT_PS)
514 gp->pause_entered++;
515
516 if (mac_cstat & MAC_CSTAT_PRCV)
517 gp->pause_last_time_recvd = (mac_cstat >> 16);
518
519 return 0;
520}
521
522static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
523{
524 u32 mif_status = readl(gp->regs + MIF_STATUS);
525 u32 reg_val, changed_bits;
526
527 reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
528 changed_bits = (mif_status & MIF_STATUS_STAT);
529
530 gem_handle_mif_event(gp, reg_val, changed_bits);
531
532 return 0;
533}
534
535static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
536{
537 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
538
539 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
540 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
541 printk(KERN_ERR "%s: PCI error [%04x] ",
542 dev->name, pci_estat);
543
544 if (pci_estat & GREG_PCIESTAT_BADACK)
545 printk("<No ACK64# during ABS64 cycle> ");
546 if (pci_estat & GREG_PCIESTAT_DTRTO)
547 printk("<Delayed transaction timeout> ");
548 if (pci_estat & GREG_PCIESTAT_OTHER)
549 printk("<other>");
550 printk("\n");
551 } else {
552 pci_estat |= GREG_PCIESTAT_OTHER;
553 printk(KERN_ERR "%s: PCI error\n", dev->name);
554 }
555
556 if (pci_estat & GREG_PCIESTAT_OTHER) {
557 u16 pci_cfg_stat;
558
559 /* Interrogate PCI config space for the
560 * true cause.
561 */
562 pci_read_config_word(gp->pdev, PCI_STATUS,
563 &pci_cfg_stat);
564 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
565 dev->name, pci_cfg_stat);
566 if (pci_cfg_stat & PCI_STATUS_PARITY)
567 printk(KERN_ERR "%s: PCI parity error detected.\n",
568 dev->name);
569 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
570 printk(KERN_ERR "%s: PCI target abort.\n",
571 dev->name);
572 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
573 printk(KERN_ERR "%s: PCI master acks target abort.\n",
574 dev->name);
575 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
576 printk(KERN_ERR "%s: PCI master abort.\n",
577 dev->name);
578 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
579 printk(KERN_ERR "%s: PCI system error SERR#.\n",
580 dev->name);
581 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
582 printk(KERN_ERR "%s: PCI parity error.\n",
583 dev->name);
584
585 /* Write the error bits back to clear them. */
586 pci_cfg_stat &= (PCI_STATUS_PARITY |
587 PCI_STATUS_SIG_TARGET_ABORT |
588 PCI_STATUS_REC_TARGET_ABORT |
589 PCI_STATUS_REC_MASTER_ABORT |
590 PCI_STATUS_SIG_SYSTEM_ERROR |
591 PCI_STATUS_DETECTED_PARITY);
592 pci_write_config_word(gp->pdev,
593 PCI_STATUS, pci_cfg_stat);
594 }
595
596 /* For all PCI errors, we should reset the chip. */
597 return 1;
598}
599
600/* All non-normal interrupt conditions get serviced here.
601 * Returns non-zero if we should just exit the interrupt
602 * handler right now (ie. if we reset the card which invalidates
603 * all of the other original irq status bits).
604 */
605static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
606{
607 if (gem_status & GREG_STAT_RXNOBUF) {
608 /* Frame arrived, no free RX buffers available. */
609 if (netif_msg_rx_err(gp))
610 printk(KERN_DEBUG "%s: no buffer for rx frame\n",
611 gp->dev->name);
612 gp->net_stats.rx_dropped++;
613 }
614
615 if (gem_status & GREG_STAT_RXTAGERR) {
616 /* corrupt RX tag framing */
617 if (netif_msg_rx_err(gp))
618 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
619 gp->dev->name);
620 gp->net_stats.rx_errors++;
621
622 goto do_reset;
623 }
624
625 if (gem_status & GREG_STAT_PCS) {
626 if (gem_pcs_interrupt(dev, gp, gem_status))
627 goto do_reset;
628 }
629
630 if (gem_status & GREG_STAT_TXMAC) {
631 if (gem_txmac_interrupt(dev, gp, gem_status))
632 goto do_reset;
633 }
634
635 if (gem_status & GREG_STAT_RXMAC) {
636 if (gem_rxmac_interrupt(dev, gp, gem_status))
637 goto do_reset;
638 }
639
640 if (gem_status & GREG_STAT_MAC) {
641 if (gem_mac_interrupt(dev, gp, gem_status))
642 goto do_reset;
643 }
644
645 if (gem_status & GREG_STAT_MIF) {
646 if (gem_mif_interrupt(dev, gp, gem_status))
647 goto do_reset;
648 }
649
650 if (gem_status & GREG_STAT_PCIERR) {
651 if (gem_pci_interrupt(dev, gp, gem_status))
652 goto do_reset;
653 }
654
655 return 0;
656
657do_reset:
658 gp->reset_task_pending = 1;
659 schedule_work(&gp->reset_task);
660
661 return 1;
662}
663
664static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
665{
666 int entry, limit;
667
668 if (netif_msg_intr(gp))
669 printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n",
670 gp->dev->name, gem_status);
671
672 entry = gp->tx_old;
673 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
674 while (entry != limit) {
675 struct sk_buff *skb;
676 struct gem_txd *txd;
677 dma_addr_t dma_addr;
678 u32 dma_len;
679 int frag;
680
681 if (netif_msg_tx_done(gp))
682 printk(KERN_DEBUG "%s: tx done, slot %d\n",
683 gp->dev->name, entry);
684 skb = gp->tx_skbs[entry];
685 if (skb_shinfo(skb)->nr_frags) {
686 int last = entry + skb_shinfo(skb)->nr_frags;
687 int walk = entry;
688 int incomplete = 0;
689
690 last &= (TX_RING_SIZE - 1);
691 for (;;) {
692 walk = NEXT_TX(walk);
693 if (walk == limit)
694 incomplete = 1;
695 if (walk == last)
696 break;
697 }
698 if (incomplete)
699 break;
700 }
701 gp->tx_skbs[entry] = NULL;
702 gp->net_stats.tx_bytes += skb->len;
703
704 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
705 txd = &gp->init_block->txd[entry];
706
707 dma_addr = le64_to_cpu(txd->buffer);
708 dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
709
710 pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
711 entry = NEXT_TX(entry);
712 }
713
714 gp->net_stats.tx_packets++;
715 dev_kfree_skb_irq(skb);
716 }
717 gp->tx_old = entry;
718
719 if (netif_queue_stopped(dev) &&
720 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
721 netif_wake_queue(dev);
722}
723
724static __inline__ void gem_post_rxds(struct gem *gp, int limit)
725{
726 int cluster_start, curr, count, kick;
727
728 cluster_start = curr = (gp->rx_new & ~(4 - 1));
729 count = 0;
730 kick = -1;
731 wmb();
732 while (curr != limit) {
733 curr = NEXT_RX(curr);
734 if (++count == 4) {
735 struct gem_rxd *rxd =
736 &gp->init_block->rxd[cluster_start];
737 for (;;) {
738 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
739 rxd++;
740 cluster_start = NEXT_RX(cluster_start);
741 if (cluster_start == curr)
742 break;
743 }
744 kick = curr;
745 count = 0;
746 }
747 }
748 if (kick >= 0) {
749 mb();
750 writel(kick, gp->regs + RXDMA_KICK);
751 }
752}
753
754static int gem_rx(struct gem *gp, int work_to_do)
755{
756 int entry, drops, work_done = 0;
757 u32 done;
758
759 if (netif_msg_rx_status(gp))
760 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
761 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
762
763 entry = gp->rx_new;
764 drops = 0;
765 done = readl(gp->regs + RXDMA_DONE);
766 for (;;) {
767 struct gem_rxd *rxd = &gp->init_block->rxd[entry];
768 struct sk_buff *skb;
769 u64 status = cpu_to_le64(rxd->status_word);
770 dma_addr_t dma_addr;
771 int len;
772
773 if ((status & RXDCTRL_OWN) != 0)
774 break;
775
776 if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
777 break;
778
779 /* When writing back RX descriptor, GEM writes status
780 * then buffer address, possibly in seperate transactions.
781 * If we don't wait for the chip to write both, we could
782 * post a new buffer to this descriptor then have GEM spam
783 * on the buffer address. We sync on the RX completion
784 * register to prevent this from happening.
785 */
786 if (entry == done) {
787 done = readl(gp->regs + RXDMA_DONE);
788 if (entry == done)
789 break;
790 }
791
792 /* We can now account for the work we're about to do */
793 work_done++;
794
795 skb = gp->rx_skbs[entry];
796
797 len = (status & RXDCTRL_BUFSZ) >> 16;
798 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
799 gp->net_stats.rx_errors++;
800 if (len < ETH_ZLEN)
801 gp->net_stats.rx_length_errors++;
802 if (len & RXDCTRL_BAD)
803 gp->net_stats.rx_crc_errors++;
804
805 /* We'll just return it to GEM. */
806 drop_it:
807 gp->net_stats.rx_dropped++;
808 goto next;
809 }
810
811 dma_addr = cpu_to_le64(rxd->buffer);
812 if (len > RX_COPY_THRESHOLD) {
813 struct sk_buff *new_skb;
814
815 new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
816 if (new_skb == NULL) {
817 drops++;
818 goto drop_it;
819 }
820 pci_unmap_page(gp->pdev, dma_addr,
821 RX_BUF_ALLOC_SIZE(gp),
822 PCI_DMA_FROMDEVICE);
823 gp->rx_skbs[entry] = new_skb;
824 new_skb->dev = gp->dev;
825 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
826 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
827 virt_to_page(new_skb->data),
828 offset_in_page(new_skb->data),
829 RX_BUF_ALLOC_SIZE(gp),
830 PCI_DMA_FROMDEVICE));
831 skb_reserve(new_skb, RX_OFFSET);
832
833 /* Trim the original skb for the netif. */
834 skb_trim(skb, len);
835 } else {
836 struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
837
838 if (copy_skb == NULL) {
839 drops++;
840 goto drop_it;
841 }
842
843 copy_skb->dev = gp->dev;
844 skb_reserve(copy_skb, 2);
845 skb_put(copy_skb, len);
846 pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
847 memcpy(copy_skb->data, skb->data, len);
848 pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
849
850 /* We'll reuse the original ring buffer. */
851 skb = copy_skb;
852 }
853
854 skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff);
855 skb->ip_summed = CHECKSUM_HW;
856 skb->protocol = eth_type_trans(skb, gp->dev);
857
858 netif_receive_skb(skb);
859
860 gp->net_stats.rx_packets++;
861 gp->net_stats.rx_bytes += len;
862 gp->dev->last_rx = jiffies;
863
864 next:
865 entry = NEXT_RX(entry);
866 }
867
868 gem_post_rxds(gp, entry);
869
870 gp->rx_new = entry;
871
872 if (drops)
873 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
874 gp->dev->name);
875
876 return work_done;
877}
878
879static int gem_poll(struct net_device *dev, int *budget)
880{
881 struct gem *gp = dev->priv;
882 unsigned long flags;
883
884 /*
885 * NAPI locking nightmare: See comment at head of driver
886 */
887 spin_lock_irqsave(&gp->lock, flags);
888
889 do {
890 int work_to_do, work_done;
891
892 /* Handle anomalies */
893 if (gp->status & GREG_STAT_ABNORMAL) {
894 if (gem_abnormal_irq(dev, gp, gp->status))
895 break;
896 }
897
898 /* Run TX completion thread */
899 spin_lock(&gp->tx_lock);
900 gem_tx(dev, gp, gp->status);
901 spin_unlock(&gp->tx_lock);
902
903 spin_unlock_irqrestore(&gp->lock, flags);
904
905 /* Run RX thread. We don't use any locking here,
906 * code willing to do bad things - like cleaning the
907 * rx ring - must call netif_poll_disable(), which
908 * schedule_timeout()'s if polling is already disabled.
909 */
910 work_to_do = min(*budget, dev->quota);
911
912 work_done = gem_rx(gp, work_to_do);
913
914 *budget -= work_done;
915 dev->quota -= work_done;
916
917 if (work_done >= work_to_do)
918 return 1;
919
920 spin_lock_irqsave(&gp->lock, flags);
921
922 gp->status = readl(gp->regs + GREG_STAT);
923 } while (gp->status & GREG_STAT_NAPI);
924
925 __netif_rx_complete(dev);
926 gem_enable_ints(gp);
927
928 spin_unlock_irqrestore(&gp->lock, flags);
929 return 0;
930}
931
932static irqreturn_t gem_interrupt(int irq, void *dev_id, struct pt_regs *regs)
933{
934 struct net_device *dev = dev_id;
935 struct gem *gp = dev->priv;
936 unsigned long flags;
937
938 /* Swallow interrupts when shutting the chip down, though
939 * that shouldn't happen, we should have done free_irq() at
940 * this point...
941 */
942 if (!gp->running)
943 return IRQ_HANDLED;
944
945 spin_lock_irqsave(&gp->lock, flags);
946
947 if (netif_rx_schedule_prep(dev)) {
948 u32 gem_status = readl(gp->regs + GREG_STAT);
949
950 if (gem_status == 0) {
86d9f7f0 951 netif_poll_enable(dev);
1da177e4
LT
952 spin_unlock_irqrestore(&gp->lock, flags);
953 return IRQ_NONE;
954 }
955 gp->status = gem_status;
956 gem_disable_ints(gp);
957 __netif_rx_schedule(dev);
958 }
959
960 spin_unlock_irqrestore(&gp->lock, flags);
961
962 /* If polling was disabled at the time we received that
963 * interrupt, we may return IRQ_HANDLED here while we
964 * should return IRQ_NONE. No big deal...
965 */
966 return IRQ_HANDLED;
967}
968
969#ifdef CONFIG_NET_POLL_CONTROLLER
970static void gem_poll_controller(struct net_device *dev)
971{
972 /* gem_interrupt is safe to reentrance so no need
973 * to disable_irq here.
974 */
975 gem_interrupt(dev->irq, dev, NULL);
976}
977#endif
978
979static void gem_tx_timeout(struct net_device *dev)
980{
981 struct gem *gp = dev->priv;
982
983 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
984 if (!gp->running) {
985 printk("%s: hrm.. hw not running !\n", dev->name);
986 return;
987 }
988 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n",
989 dev->name,
990 readl(gp->regs + TXDMA_CFG),
991 readl(gp->regs + MAC_TXSTAT),
992 readl(gp->regs + MAC_TXCFG));
993 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
994 dev->name,
995 readl(gp->regs + RXDMA_CFG),
996 readl(gp->regs + MAC_RXSTAT),
997 readl(gp->regs + MAC_RXCFG));
998
999 spin_lock_irq(&gp->lock);
1000 spin_lock(&gp->tx_lock);
1001
1002 gp->reset_task_pending = 1;
1003 schedule_work(&gp->reset_task);
1004
1005 spin_unlock(&gp->tx_lock);
1006 spin_unlock_irq(&gp->lock);
1007}
1008
1009static __inline__ int gem_intme(int entry)
1010{
1011 /* Algorithm: IRQ every 1/2 of descriptors. */
1012 if (!(entry & ((TX_RING_SIZE>>1)-1)))
1013 return 1;
1014
1015 return 0;
1016}
1017
1018static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
1019{
1020 struct gem *gp = dev->priv;
1021 int entry;
1022 u64 ctrl;
1023 unsigned long flags;
1024
1025 ctrl = 0;
1026 if (skb->ip_summed == CHECKSUM_HW) {
1027 u64 csum_start_off, csum_stuff_off;
1028
1029 csum_start_off = (u64) (skb->h.raw - skb->data);
1030 csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
1031
1032 ctrl = (TXDCTRL_CENAB |
1033 (csum_start_off << 15) |
1034 (csum_stuff_off << 21));
1035 }
1036
1037 local_irq_save(flags);
1038 if (!spin_trylock(&gp->tx_lock)) {
1039 /* Tell upper layer to requeue */
1040 local_irq_restore(flags);
1041 return NETDEV_TX_LOCKED;
1042 }
1043 /* We raced with gem_do_stop() */
1044 if (!gp->running) {
1045 spin_unlock_irqrestore(&gp->tx_lock, flags);
1046 return NETDEV_TX_BUSY;
1047 }
1048
1049 /* This is a hard error, log it. */
1050 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
1051 netif_stop_queue(dev);
1052 spin_unlock_irqrestore(&gp->tx_lock, flags);
1053 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
1054 dev->name);
1055 return NETDEV_TX_BUSY;
1056 }
1057
1058 entry = gp->tx_new;
1059 gp->tx_skbs[entry] = skb;
1060
1061 if (skb_shinfo(skb)->nr_frags == 0) {
1062 struct gem_txd *txd = &gp->init_block->txd[entry];
1063 dma_addr_t mapping;
1064 u32 len;
1065
1066 len = skb->len;
1067 mapping = pci_map_page(gp->pdev,
1068 virt_to_page(skb->data),
1069 offset_in_page(skb->data),
1070 len, PCI_DMA_TODEVICE);
1071 ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
1072 if (gem_intme(entry))
1073 ctrl |= TXDCTRL_INTME;
1074 txd->buffer = cpu_to_le64(mapping);
1075 wmb();
1076 txd->control_word = cpu_to_le64(ctrl);
1077 entry = NEXT_TX(entry);
1078 } else {
1079 struct gem_txd *txd;
1080 u32 first_len;
1081 u64 intme;
1082 dma_addr_t first_mapping;
1083 int frag, first_entry = entry;
1084
1085 intme = 0;
1086 if (gem_intme(entry))
1087 intme |= TXDCTRL_INTME;
1088
1089 /* We must give this initial chunk to the device last.
1090 * Otherwise we could race with the device.
1091 */
1092 first_len = skb_headlen(skb);
1093 first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
1094 offset_in_page(skb->data),
1095 first_len, PCI_DMA_TODEVICE);
1096 entry = NEXT_TX(entry);
1097
1098 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1099 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
1100 u32 len;
1101 dma_addr_t mapping;
1102 u64 this_ctrl;
1103
1104 len = this_frag->size;
1105 mapping = pci_map_page(gp->pdev,
1106 this_frag->page,
1107 this_frag->page_offset,
1108 len, PCI_DMA_TODEVICE);
1109 this_ctrl = ctrl;
1110 if (frag == skb_shinfo(skb)->nr_frags - 1)
1111 this_ctrl |= TXDCTRL_EOF;
1112
1113 txd = &gp->init_block->txd[entry];
1114 txd->buffer = cpu_to_le64(mapping);
1115 wmb();
1116 txd->control_word = cpu_to_le64(this_ctrl | len);
1117
1118 if (gem_intme(entry))
1119 intme |= TXDCTRL_INTME;
1120
1121 entry = NEXT_TX(entry);
1122 }
1123 txd = &gp->init_block->txd[first_entry];
1124 txd->buffer = cpu_to_le64(first_mapping);
1125 wmb();
1126 txd->control_word =
1127 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
1128 }
1129
1130 gp->tx_new = entry;
1131 if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))
1132 netif_stop_queue(dev);
1133
1134 if (netif_msg_tx_queued(gp))
1135 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
1136 dev->name, entry, skb->len);
1137 mb();
1138 writel(gp->tx_new, gp->regs + TXDMA_KICK);
1139 spin_unlock_irqrestore(&gp->tx_lock, flags);
1140
1141 dev->trans_start = jiffies;
1142
1143 return NETDEV_TX_OK;
1144}
1145
1146#define STOP_TRIES 32
1147
1148/* Must be invoked under gp->lock and gp->tx_lock. */
1149static void gem_reset(struct gem *gp)
1150{
1151 int limit;
1152 u32 val;
1153
1154 /* Make sure we won't get any more interrupts */
1155 writel(0xffffffff, gp->regs + GREG_IMASK);
1156
1157 /* Reset the chip */
1158 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
1159 gp->regs + GREG_SWRST);
1160
1161 limit = STOP_TRIES;
1162
1163 do {
1164 udelay(20);
1165 val = readl(gp->regs + GREG_SWRST);
1166 if (limit-- <= 0)
1167 break;
1168 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
1169
1170 if (limit <= 0)
1171 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
1172}
1173
1174/* Must be invoked under gp->lock and gp->tx_lock. */
1175static void gem_start_dma(struct gem *gp)
1176{
1177 u32 val;
1178
1179 /* We are ready to rock, turn everything on. */
1180 val = readl(gp->regs + TXDMA_CFG);
1181 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1182 val = readl(gp->regs + RXDMA_CFG);
1183 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1184 val = readl(gp->regs + MAC_TXCFG);
1185 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1186 val = readl(gp->regs + MAC_RXCFG);
1187 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1188
1189 (void) readl(gp->regs + MAC_RXCFG);
1190 udelay(100);
1191
1192 gem_enable_ints(gp);
1193
1194 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1195}
1196
1197/* Must be invoked under gp->lock and gp->tx_lock. DMA won't be
1198 * actually stopped before about 4ms tho ...
1199 */
1200static void gem_stop_dma(struct gem *gp)
1201{
1202 u32 val;
1203
1204 /* We are done rocking, turn everything off. */
1205 val = readl(gp->regs + TXDMA_CFG);
1206 writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1207 val = readl(gp->regs + RXDMA_CFG);
1208 writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1209 val = readl(gp->regs + MAC_TXCFG);
1210 writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1211 val = readl(gp->regs + MAC_RXCFG);
1212 writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1213
1214 (void) readl(gp->regs + MAC_RXCFG);
1215
1216 /* Need to wait a bit ... done by the caller */
1217}
1218
1219
1220/* Must be invoked under gp->lock and gp->tx_lock. */
1221// XXX dbl check what that function should do when called on PCS PHY
1222static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
1223{
1224 u32 advertise, features;
1225 int autoneg;
1226 int speed;
1227 int duplex;
1228
1229 if (gp->phy_type != phy_mii_mdio0 &&
1230 gp->phy_type != phy_mii_mdio1)
1231 goto non_mii;
1232
1233 /* Setup advertise */
1234 if (found_mii_phy(gp))
1235 features = gp->phy_mii.def->features;
1236 else
1237 features = 0;
1238
1239 advertise = features & ADVERTISE_MASK;
1240 if (gp->phy_mii.advertising != 0)
1241 advertise &= gp->phy_mii.advertising;
1242
1243 autoneg = gp->want_autoneg;
1244 speed = gp->phy_mii.speed;
1245 duplex = gp->phy_mii.duplex;
1246
1247 /* Setup link parameters */
1248 if (!ep)
1249 goto start_aneg;
1250 if (ep->autoneg == AUTONEG_ENABLE) {
1251 advertise = ep->advertising;
1252 autoneg = 1;
1253 } else {
1254 autoneg = 0;
1255 speed = ep->speed;
1256 duplex = ep->duplex;
1257 }
1258
1259start_aneg:
1260 /* Sanitize settings based on PHY capabilities */
1261 if ((features & SUPPORTED_Autoneg) == 0)
1262 autoneg = 0;
1263 if (speed == SPEED_1000 &&
1264 !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
1265 speed = SPEED_100;
1266 if (speed == SPEED_100 &&
1267 !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
1268 speed = SPEED_10;
1269 if (duplex == DUPLEX_FULL &&
1270 !(features & (SUPPORTED_1000baseT_Full |
1271 SUPPORTED_100baseT_Full |
1272 SUPPORTED_10baseT_Full)))
1273 duplex = DUPLEX_HALF;
1274 if (speed == 0)
1275 speed = SPEED_10;
1276
1277 /* If we are asleep, we don't try to actually setup the PHY, we
1278 * just store the settings
1279 */
1280 if (gp->asleep) {
1281 gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
1282 gp->phy_mii.speed = speed;
1283 gp->phy_mii.duplex = duplex;
1284 return;
1285 }
1286
1287 /* Configure PHY & start aneg */
1288 gp->want_autoneg = autoneg;
1289 if (autoneg) {
1290 if (found_mii_phy(gp))
1291 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
1292 gp->lstate = link_aneg;
1293 } else {
1294 if (found_mii_phy(gp))
1295 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
1296 gp->lstate = link_force_ok;
1297 }
1298
1299non_mii:
1300 gp->timer_ticks = 0;
1301 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1302}
1303
1304/* A link-up condition has occurred, initialize and enable the
1305 * rest of the chip.
1306 *
1307 * Must be invoked under gp->lock and gp->tx_lock.
1308 */
1309static int gem_set_link_modes(struct gem *gp)
1310{
1311 u32 val;
1312 int full_duplex, speed, pause;
1313
1314 full_duplex = 0;
1315 speed = SPEED_10;
1316 pause = 0;
1317
1318 if (found_mii_phy(gp)) {
1319 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
1320 return 1;
1321 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
1322 speed = gp->phy_mii.speed;
1323 pause = gp->phy_mii.pause;
1324 } else if (gp->phy_type == phy_serialink ||
1325 gp->phy_type == phy_serdes) {
1326 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1327
1328 if (pcs_lpa & PCS_MIIADV_FD)
1329 full_duplex = 1;
1330 speed = SPEED_1000;
1331 }
1332
1333 if (netif_msg_link(gp))
1334 printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n",
1335 gp->dev->name, speed, (full_duplex ? "full" : "half"));
1336
1337 if (!gp->running)
1338 return 0;
1339
1340 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
1341 if (full_duplex) {
1342 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
1343 } else {
1344 /* MAC_TXCFG_NBO must be zero. */
1345 }
1346 writel(val, gp->regs + MAC_TXCFG);
1347
1348 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
1349 if (!full_duplex &&
1350 (gp->phy_type == phy_mii_mdio0 ||
1351 gp->phy_type == phy_mii_mdio1)) {
1352 val |= MAC_XIFCFG_DISE;
1353 } else if (full_duplex) {
1354 val |= MAC_XIFCFG_FLED;
1355 }
1356
1357 if (speed == SPEED_1000)
1358 val |= (MAC_XIFCFG_GMII);
1359
1360 writel(val, gp->regs + MAC_XIFCFG);
1361
1362 /* If gigabit and half-duplex, enable carrier extension
1363 * mode. Else, disable it.
1364 */
1365 if (speed == SPEED_1000 && !full_duplex) {
1366 val = readl(gp->regs + MAC_TXCFG);
1367 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1368
1369 val = readl(gp->regs + MAC_RXCFG);
1370 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1371 } else {
1372 val = readl(gp->regs + MAC_TXCFG);
1373 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1374
1375 val = readl(gp->regs + MAC_RXCFG);
1376 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1377 }
1378
1379 if (gp->phy_type == phy_serialink ||
1380 gp->phy_type == phy_serdes) {
1381 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1382
1383 if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
1384 pause = 1;
1385 }
1386
1387 if (netif_msg_link(gp)) {
1388 if (pause) {
1389 printk(KERN_INFO "%s: Pause is enabled "
1390 "(rxfifo: %d off: %d on: %d)\n",
1391 gp->dev->name,
1392 gp->rx_fifo_sz,
1393 gp->rx_pause_off,
1394 gp->rx_pause_on);
1395 } else {
1396 printk(KERN_INFO "%s: Pause is disabled\n",
1397 gp->dev->name);
1398 }
1399 }
1400
1401 if (!full_duplex)
1402 writel(512, gp->regs + MAC_STIME);
1403 else
1404 writel(64, gp->regs + MAC_STIME);
1405 val = readl(gp->regs + MAC_MCCFG);
1406 if (pause)
1407 val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1408 else
1409 val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1410 writel(val, gp->regs + MAC_MCCFG);
1411
1412 gem_start_dma(gp);
1413
1414 return 0;
1415}
1416
1417/* Must be invoked under gp->lock and gp->tx_lock. */
1418static int gem_mdio_link_not_up(struct gem *gp)
1419{
1420 switch (gp->lstate) {
1421 case link_force_ret:
1422 if (netif_msg_link(gp))
1423 printk(KERN_INFO "%s: Autoneg failed again, keeping"
1424 " forced mode\n", gp->dev->name);
1425 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
1426 gp->last_forced_speed, DUPLEX_HALF);
1427 gp->timer_ticks = 5;
1428 gp->lstate = link_force_ok;
1429 return 0;
1430 case link_aneg:
1431 /* We try forced modes after a failed aneg only on PHYs that don't
1432 * have "magic_aneg" bit set, which means they internally do the
1433 * while forced-mode thingy. On these, we just restart aneg
1434 */
1435 if (gp->phy_mii.def->magic_aneg)
1436 return 1;
1437 if (netif_msg_link(gp))
1438 printk(KERN_INFO "%s: switching to forced 100bt\n",
1439 gp->dev->name);
1440 /* Try forced modes. */
1441 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
1442 DUPLEX_HALF);
1443 gp->timer_ticks = 5;
1444 gp->lstate = link_force_try;
1445 return 0;
1446 case link_force_try:
1447 /* Downgrade from 100 to 10 Mbps if necessary.
1448 * If already at 10Mbps, warn user about the
1449 * situation every 10 ticks.
1450 */
1451 if (gp->phy_mii.speed == SPEED_100) {
1452 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
1453 DUPLEX_HALF);
1454 gp->timer_ticks = 5;
1455 if (netif_msg_link(gp))
1456 printk(KERN_INFO "%s: switching to forced 10bt\n",
1457 gp->dev->name);
1458 return 0;
1459 } else
1460 return 1;
1461 default:
1462 return 0;
1463 }
1464}
1465
1466static void gem_link_timer(unsigned long data)
1467{
1468 struct gem *gp = (struct gem *) data;
1469 int restart_aneg = 0;
1470
1471 if (gp->asleep)
1472 return;
1473
1474 spin_lock_irq(&gp->lock);
1475 spin_lock(&gp->tx_lock);
1476 gem_get_cell(gp);
1477
1478 /* If the reset task is still pending, we just
1479 * reschedule the link timer
1480 */
1481 if (gp->reset_task_pending)
1482 goto restart;
1483
1484 if (gp->phy_type == phy_serialink ||
1485 gp->phy_type == phy_serdes) {
1486 u32 val = readl(gp->regs + PCS_MIISTAT);
1487
1488 if (!(val & PCS_MIISTAT_LS))
1489 val = readl(gp->regs + PCS_MIISTAT);
1490
1491 if ((val & PCS_MIISTAT_LS) != 0) {
1492 gp->lstate = link_up;
1493 netif_carrier_on(gp->dev);
1494 (void)gem_set_link_modes(gp);
1495 }
1496 goto restart;
1497 }
1498 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
1499 /* Ok, here we got a link. If we had it due to a forced
1500 * fallback, and we were configured for autoneg, we do
1501 * retry a short autoneg pass. If you know your hub is
1502 * broken, use ethtool ;)
1503 */
1504 if (gp->lstate == link_force_try && gp->want_autoneg) {
1505 gp->lstate = link_force_ret;
1506 gp->last_forced_speed = gp->phy_mii.speed;
1507 gp->timer_ticks = 5;
1508 if (netif_msg_link(gp))
1509 printk(KERN_INFO "%s: Got link after fallback, retrying"
1510 " autoneg once...\n", gp->dev->name);
1511 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
1512 } else if (gp->lstate != link_up) {
1513 gp->lstate = link_up;
1514 netif_carrier_on(gp->dev);
1515 if (gem_set_link_modes(gp))
1516 restart_aneg = 1;
1517 }
1518 } else {
1519 /* If the link was previously up, we restart the
1520 * whole process
1521 */
1522 if (gp->lstate == link_up) {
1523 gp->lstate = link_down;
1524 if (netif_msg_link(gp))
1525 printk(KERN_INFO "%s: Link down\n",
1526 gp->dev->name);
1527 netif_carrier_off(gp->dev);
1528 gp->reset_task_pending = 1;
1529 schedule_work(&gp->reset_task);
1530 restart_aneg = 1;
1531 } else if (++gp->timer_ticks > 10) {
1532 if (found_mii_phy(gp))
1533 restart_aneg = gem_mdio_link_not_up(gp);
1534 else
1535 restart_aneg = 1;
1536 }
1537 }
1538 if (restart_aneg) {
1539 gem_begin_auto_negotiation(gp, NULL);
1540 goto out_unlock;
1541 }
1542restart:
1543 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1544out_unlock:
1545 gem_put_cell(gp);
1546 spin_unlock(&gp->tx_lock);
1547 spin_unlock_irq(&gp->lock);
1548}
1549
1550/* Must be invoked under gp->lock and gp->tx_lock. */
1551static void gem_clean_rings(struct gem *gp)
1552{
1553 struct gem_init_block *gb = gp->init_block;
1554 struct sk_buff *skb;
1555 int i;
1556 dma_addr_t dma_addr;
1557
1558 for (i = 0; i < RX_RING_SIZE; i++) {
1559 struct gem_rxd *rxd;
1560
1561 rxd = &gb->rxd[i];
1562 if (gp->rx_skbs[i] != NULL) {
1563 skb = gp->rx_skbs[i];
1564 dma_addr = le64_to_cpu(rxd->buffer);
1565 pci_unmap_page(gp->pdev, dma_addr,
1566 RX_BUF_ALLOC_SIZE(gp),
1567 PCI_DMA_FROMDEVICE);
1568 dev_kfree_skb_any(skb);
1569 gp->rx_skbs[i] = NULL;
1570 }
1571 rxd->status_word = 0;
1572 wmb();
1573 rxd->buffer = 0;
1574 }
1575
1576 for (i = 0; i < TX_RING_SIZE; i++) {
1577 if (gp->tx_skbs[i] != NULL) {
1578 struct gem_txd *txd;
1579 int frag;
1580
1581 skb = gp->tx_skbs[i];
1582 gp->tx_skbs[i] = NULL;
1583
1584 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1585 int ent = i & (TX_RING_SIZE - 1);
1586
1587 txd = &gb->txd[ent];
1588 dma_addr = le64_to_cpu(txd->buffer);
1589 pci_unmap_page(gp->pdev, dma_addr,
1590 le64_to_cpu(txd->control_word) &
1591 TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
1592
1593 if (frag != skb_shinfo(skb)->nr_frags)
1594 i++;
1595 }
1596 dev_kfree_skb_any(skb);
1597 }
1598 }
1599}
1600
1601/* Must be invoked under gp->lock and gp->tx_lock. */
1602static void gem_init_rings(struct gem *gp)
1603{
1604 struct gem_init_block *gb = gp->init_block;
1605 struct net_device *dev = gp->dev;
1606 int i;
1607 dma_addr_t dma_addr;
1608
1609 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
1610
1611 gem_clean_rings(gp);
1612
1613 gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
1614 (unsigned)VLAN_ETH_FRAME_LEN);
1615
1616 for (i = 0; i < RX_RING_SIZE; i++) {
1617 struct sk_buff *skb;
1618 struct gem_rxd *rxd = &gb->rxd[i];
1619
1620 skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
1621 if (!skb) {
1622 rxd->buffer = 0;
1623 rxd->status_word = 0;
1624 continue;
1625 }
1626
1627 gp->rx_skbs[i] = skb;
1628 skb->dev = dev;
1629 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
1630 dma_addr = pci_map_page(gp->pdev,
1631 virt_to_page(skb->data),
1632 offset_in_page(skb->data),
1633 RX_BUF_ALLOC_SIZE(gp),
1634 PCI_DMA_FROMDEVICE);
1635 rxd->buffer = cpu_to_le64(dma_addr);
1636 wmb();
1637 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
1638 skb_reserve(skb, RX_OFFSET);
1639 }
1640
1641 for (i = 0; i < TX_RING_SIZE; i++) {
1642 struct gem_txd *txd = &gb->txd[i];
1643
1644 txd->control_word = 0;
1645 wmb();
1646 txd->buffer = 0;
1647 }
1648 wmb();
1649}
1650
1651/* Init PHY interface and start link poll state machine */
1652static void gem_init_phy(struct gem *gp)
1653{
1654 u32 mifcfg;
1655
1656 /* Revert MIF CFG setting done on stop_phy */
1657 mifcfg = readl(gp->regs + MIF_CFG);
1658 mifcfg &= ~MIF_CFG_BBMODE;
1659 writel(mifcfg, gp->regs + MIF_CFG);
1660
1661 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
1662 int i;
1663
1664 /* Those delay sucks, the HW seem to love them though, I'll
1665 * serisouly consider breaking some locks here to be able
1666 * to schedule instead
1667 */
1668 for (i = 0; i < 3; i++) {
1669#ifdef CONFIG_PPC_PMAC
1670 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
1671 msleep(20);
1672#endif
1673 /* Some PHYs used by apple have problem getting back to us,
1674 * we do an additional reset here
1675 */
1676 phy_write(gp, MII_BMCR, BMCR_RESET);
1677 msleep(20);
1678 if (phy_read(gp, MII_BMCR) != 0xffff)
1679 break;
1680 if (i == 2)
1681 printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
1682 gp->dev->name);
1683 }
1684 }
1685
1686 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
1687 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
1688 u32 val;
1689
1690 /* Init datapath mode register. */
1691 if (gp->phy_type == phy_mii_mdio0 ||
1692 gp->phy_type == phy_mii_mdio1) {
1693 val = PCS_DMODE_MGM;
1694 } else if (gp->phy_type == phy_serialink) {
1695 val = PCS_DMODE_SM | PCS_DMODE_GMOE;
1696 } else {
1697 val = PCS_DMODE_ESM;
1698 }
1699
1700 writel(val, gp->regs + PCS_DMODE);
1701 }
1702
1703 if (gp->phy_type == phy_mii_mdio0 ||
1704 gp->phy_type == phy_mii_mdio1) {
1705 // XXX check for errors
1706 mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
1707
1708 /* Init PHY */
1709 if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
1710 gp->phy_mii.def->ops->init(&gp->phy_mii);
1711 } else {
1712 u32 val;
1713 int limit;
1714
1715 /* Reset PCS unit. */
1716 val = readl(gp->regs + PCS_MIICTRL);
1717 val |= PCS_MIICTRL_RST;
1718 writeb(val, gp->regs + PCS_MIICTRL);
1719
1720 limit = 32;
1721 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1722 udelay(100);
1723 if (limit-- <= 0)
1724 break;
1725 }
1726 if (limit <= 0)
1727 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
1728 gp->dev->name);
1729
1730 /* Make sure PCS is disabled while changing advertisement
1731 * configuration.
1732 */
1733 val = readl(gp->regs + PCS_CFG);
1734 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
1735 writel(val, gp->regs + PCS_CFG);
1736
1737 /* Advertise all capabilities except assymetric
1738 * pause.
1739 */
1740 val = readl(gp->regs + PCS_MIIADV);
1741 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
1742 PCS_MIIADV_SP | PCS_MIIADV_AP);
1743 writel(val, gp->regs + PCS_MIIADV);
1744
1745 /* Enable and restart auto-negotiation, disable wrapback/loopback,
1746 * and re-enable PCS.
1747 */
1748 val = readl(gp->regs + PCS_MIICTRL);
1749 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
1750 val &= ~PCS_MIICTRL_WB;
1751 writel(val, gp->regs + PCS_MIICTRL);
1752
1753 val = readl(gp->regs + PCS_CFG);
1754 val |= PCS_CFG_ENABLE;
1755 writel(val, gp->regs + PCS_CFG);
1756
1757 /* Make sure serialink loopback is off. The meaning
1758 * of this bit is logically inverted based upon whether
1759 * you are in Serialink or SERDES mode.
1760 */
1761 val = readl(gp->regs + PCS_SCTRL);
1762 if (gp->phy_type == phy_serialink)
1763 val &= ~PCS_SCTRL_LOOP;
1764 else
1765 val |= PCS_SCTRL_LOOP;
1766 writel(val, gp->regs + PCS_SCTRL);
1767 }
1768
1769 /* Default aneg parameters */
1770 gp->timer_ticks = 0;
1771 gp->lstate = link_down;
1772 netif_carrier_off(gp->dev);
1773
1774 /* Can I advertise gigabit here ? I'd need BCM PHY docs... */
1775 spin_lock_irq(&gp->lock);
1776 gem_begin_auto_negotiation(gp, NULL);
1777 spin_unlock_irq(&gp->lock);
1778}
1779
1780/* Must be invoked under gp->lock and gp->tx_lock. */
1781static void gem_init_dma(struct gem *gp)
1782{
1783 u64 desc_dma = (u64) gp->gblock_dvma;
1784 u32 val;
1785
1786 val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
1787 writel(val, gp->regs + TXDMA_CFG);
1788
1789 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
1790 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
1791 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
1792
1793 writel(0, gp->regs + TXDMA_KICK);
1794
1795 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
1796 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
1797 writel(val, gp->regs + RXDMA_CFG);
1798
1799 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
1800 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
1801
1802 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1803
1804 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
1805 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
1806 writel(val, gp->regs + RXDMA_PTHRESH);
1807
1808 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
1809 writel(((5 & RXDMA_BLANK_IPKTS) |
1810 ((8 << 12) & RXDMA_BLANK_ITIME)),
1811 gp->regs + RXDMA_BLANK);
1812 else
1813 writel(((5 & RXDMA_BLANK_IPKTS) |
1814 ((4 << 12) & RXDMA_BLANK_ITIME)),
1815 gp->regs + RXDMA_BLANK);
1816}
1817
1818/* Must be invoked under gp->lock and gp->tx_lock. */
1819static u32 gem_setup_multicast(struct gem *gp)
1820{
1821 u32 rxcfg = 0;
1822 int i;
1823
1824 if ((gp->dev->flags & IFF_ALLMULTI) ||
1825 (gp->dev->mc_count > 256)) {
1826 for (i=0; i<16; i++)
1827 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
1828 rxcfg |= MAC_RXCFG_HFE;
1829 } else if (gp->dev->flags & IFF_PROMISC) {
1830 rxcfg |= MAC_RXCFG_PROM;
1831 } else {
1832 u16 hash_table[16];
1833 u32 crc;
1834 struct dev_mc_list *dmi = gp->dev->mc_list;
1835 int i;
1836
1837 for (i = 0; i < 16; i++)
1838 hash_table[i] = 0;
1839
1840 for (i = 0; i < gp->dev->mc_count; i++) {
1841 char *addrs = dmi->dmi_addr;
1842
1843 dmi = dmi->next;
1844
1845 if (!(*addrs & 1))
1846 continue;
1847
1848 crc = ether_crc_le(6, addrs);
1849 crc >>= 24;
1850 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
1851 }
1852 for (i=0; i<16; i++)
1853 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
1854 rxcfg |= MAC_RXCFG_HFE;
1855 }
1856
1857 return rxcfg;
1858}
1859
1860/* Must be invoked under gp->lock and gp->tx_lock. */
1861static void gem_init_mac(struct gem *gp)
1862{
1863 unsigned char *e = &gp->dev->dev_addr[0];
1864
1865 writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
1866
1867 writel(0x00, gp->regs + MAC_IPG0);
1868 writel(0x08, gp->regs + MAC_IPG1);
1869 writel(0x04, gp->regs + MAC_IPG2);
1870 writel(0x40, gp->regs + MAC_STIME);
1871 writel(0x40, gp->regs + MAC_MINFSZ);
1872
1873 /* Ethernet payload + header + FCS + optional VLAN tag. */
1874 writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
1875
1876 writel(0x07, gp->regs + MAC_PASIZE);
1877 writel(0x04, gp->regs + MAC_JAMSIZE);
1878 writel(0x10, gp->regs + MAC_ATTLIM);
1879 writel(0x8808, gp->regs + MAC_MCTYPE);
1880
1881 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
1882
1883 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
1884 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
1885 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
1886
1887 writel(0, gp->regs + MAC_ADDR3);
1888 writel(0, gp->regs + MAC_ADDR4);
1889 writel(0, gp->regs + MAC_ADDR5);
1890
1891 writel(0x0001, gp->regs + MAC_ADDR6);
1892 writel(0xc200, gp->regs + MAC_ADDR7);
1893 writel(0x0180, gp->regs + MAC_ADDR8);
1894
1895 writel(0, gp->regs + MAC_AFILT0);
1896 writel(0, gp->regs + MAC_AFILT1);
1897 writel(0, gp->regs + MAC_AFILT2);
1898 writel(0, gp->regs + MAC_AF21MSK);
1899 writel(0, gp->regs + MAC_AF0MSK);
1900
1901 gp->mac_rx_cfg = gem_setup_multicast(gp);
1902#ifdef STRIP_FCS
1903 gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
1904#endif
1905 writel(0, gp->regs + MAC_NCOLL);
1906 writel(0, gp->regs + MAC_FASUCC);
1907 writel(0, gp->regs + MAC_ECOLL);
1908 writel(0, gp->regs + MAC_LCOLL);
1909 writel(0, gp->regs + MAC_DTIMER);
1910 writel(0, gp->regs + MAC_PATMPS);
1911 writel(0, gp->regs + MAC_RFCTR);
1912 writel(0, gp->regs + MAC_LERR);
1913 writel(0, gp->regs + MAC_AERR);
1914 writel(0, gp->regs + MAC_FCSERR);
1915 writel(0, gp->regs + MAC_RXCVERR);
1916
1917 /* Clear RX/TX/MAC/XIF config, we will set these up and enable
1918 * them once a link is established.
1919 */
1920 writel(0, gp->regs + MAC_TXCFG);
1921 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
1922 writel(0, gp->regs + MAC_MCCFG);
1923 writel(0, gp->regs + MAC_XIFCFG);
1924
1925 /* Setup MAC interrupts. We want to get all of the interesting
1926 * counter expiration events, but we do not want to hear about
1927 * normal rx/tx as the DMA engine tells us that.
1928 */
1929 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
1930 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
1931
1932 /* Don't enable even the PAUSE interrupts for now, we
1933 * make no use of those events other than to record them.
1934 */
1935 writel(0xffffffff, gp->regs + MAC_MCMASK);
1936
1937 /* Don't enable GEM's WOL in normal operations
1938 */
1939 if (gp->has_wol)
1940 writel(0, gp->regs + WOL_WAKECSR);
1941}
1942
1943/* Must be invoked under gp->lock and gp->tx_lock. */
1944static void gem_init_pause_thresholds(struct gem *gp)
1945{
1946 u32 cfg;
1947
1948 /* Calculate pause thresholds. Setting the OFF threshold to the
1949 * full RX fifo size effectively disables PAUSE generation which
1950 * is what we do for 10/100 only GEMs which have FIFOs too small
1951 * to make real gains from PAUSE.
1952 */
1953 if (gp->rx_fifo_sz <= (2 * 1024)) {
1954 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
1955 } else {
1956 int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
1957 int off = (gp->rx_fifo_sz - (max_frame * 2));
1958 int on = off - max_frame;
1959
1960 gp->rx_pause_off = off;
1961 gp->rx_pause_on = on;
1962 }
1963
1964
1965 /* Configure the chip "burst" DMA mode & enable some
1966 * HW bug fixes on Apple version
1967 */
1968 cfg = 0;
1969 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
1970 cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
1971#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
1972 cfg |= GREG_CFG_IBURST;
1973#endif
1974 cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
1975 cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
1976 writel(cfg, gp->regs + GREG_CFG);
1977
1978 /* If Infinite Burst didn't stick, then use different
1979 * thresholds (and Apple bug fixes don't exist)
1980 */
1981 if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
1982 cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
1983 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
1984 writel(cfg, gp->regs + GREG_CFG);
1985 }
1986}
1987
1988static int gem_check_invariants(struct gem *gp)
1989{
1990 struct pci_dev *pdev = gp->pdev;
1991 u32 mif_cfg;
1992
1993 /* On Apple's sungem, we can't rely on registers as the chip
1994 * was been powered down by the firmware. The PHY is looked
1995 * up later on.
1996 */
1997 if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
1998 gp->phy_type = phy_mii_mdio0;
1999 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2000 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2001 gp->swrst_base = 0;
2002
2003 mif_cfg = readl(gp->regs + MIF_CFG);
2004 mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
2005 mif_cfg |= MIF_CFG_MDI0;
2006 writel(mif_cfg, gp->regs + MIF_CFG);
2007 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
2008 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
2009
2010 /* We hard-code the PHY address so we can properly bring it out of
2011 * reset later on, we can't really probe it at this point, though
2012 * that isn't an issue.
2013 */
2014 if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
2015 gp->mii_phy_addr = 1;
2016 else
2017 gp->mii_phy_addr = 0;
2018
2019 return 0;
2020 }
2021
2022 mif_cfg = readl(gp->regs + MIF_CFG);
2023
2024 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
2025 pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
2026 /* One of the MII PHYs _must_ be present
2027 * as this chip has no gigabit PHY.
2028 */
2029 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
2030 printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n",
2031 mif_cfg);
2032 return -1;
2033 }
2034 }
2035
2036 /* Determine initial PHY interface type guess. MDIO1 is the
2037 * external PHY and thus takes precedence over MDIO0.
2038 */
2039
2040 if (mif_cfg & MIF_CFG_MDI1) {
2041 gp->phy_type = phy_mii_mdio1;
2042 mif_cfg |= MIF_CFG_PSELECT;
2043 writel(mif_cfg, gp->regs + MIF_CFG);
2044 } else if (mif_cfg & MIF_CFG_MDI0) {
2045 gp->phy_type = phy_mii_mdio0;
2046 mif_cfg &= ~MIF_CFG_PSELECT;
2047 writel(mif_cfg, gp->regs + MIF_CFG);
2048 } else {
2049 gp->phy_type = phy_serialink;
2050 }
2051 if (gp->phy_type == phy_mii_mdio1 ||
2052 gp->phy_type == phy_mii_mdio0) {
2053 int i;
2054
2055 for (i = 0; i < 32; i++) {
2056 gp->mii_phy_addr = i;
2057 if (phy_read(gp, MII_BMCR) != 0xffff)
2058 break;
2059 }
2060 if (i == 32) {
2061 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
2062 printk(KERN_ERR PFX "RIO MII phy will not respond.\n");
2063 return -1;
2064 }
2065 gp->phy_type = phy_serdes;
2066 }
2067 }
2068
2069 /* Fetch the FIFO configurations now too. */
2070 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2071 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2072
2073 if (pdev->vendor == PCI_VENDOR_ID_SUN) {
2074 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
2075 if (gp->tx_fifo_sz != (9 * 1024) ||
2076 gp->rx_fifo_sz != (20 * 1024)) {
2077 printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2078 gp->tx_fifo_sz, gp->rx_fifo_sz);
2079 return -1;
2080 }
2081 gp->swrst_base = 0;
2082 } else {
2083 if (gp->tx_fifo_sz != (2 * 1024) ||
2084 gp->rx_fifo_sz != (2 * 1024)) {
2085 printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2086 gp->tx_fifo_sz, gp->rx_fifo_sz);
2087 return -1;
2088 }
2089 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
2090 }
2091 }
2092
2093 return 0;
2094}
2095
2096/* Must be invoked under gp->lock and gp->tx_lock. */
2097static void gem_reinit_chip(struct gem *gp)
2098{
2099 /* Reset the chip */
2100 gem_reset(gp);
2101
2102 /* Make sure ints are disabled */
2103 gem_disable_ints(gp);
2104
2105 /* Allocate & setup ring buffers */
2106 gem_init_rings(gp);
2107
2108 /* Configure pause thresholds */
2109 gem_init_pause_thresholds(gp);
2110
2111 /* Init DMA & MAC engines */
2112 gem_init_dma(gp);
2113 gem_init_mac(gp);
2114}
2115
2116
2117/* Must be invoked with no lock held. */
2118static void gem_stop_phy(struct gem *gp, int wol)
2119{
2120 u32 mifcfg;
2121 unsigned long flags;
2122
2123 /* Let the chip settle down a bit, it seems that helps
2124 * for sleep mode on some models
2125 */
2126 msleep(10);
2127
2128 /* Make sure we aren't polling PHY status change. We
2129 * don't currently use that feature though
2130 */
2131 mifcfg = readl(gp->regs + MIF_CFG);
2132 mifcfg &= ~MIF_CFG_POLL;
2133 writel(mifcfg, gp->regs + MIF_CFG);
2134
2135 if (wol && gp->has_wol) {
2136 unsigned char *e = &gp->dev->dev_addr[0];
2137 u32 csr;
2138
2139 /* Setup wake-on-lan for MAGIC packet */
2140 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
2141 gp->regs + MAC_RXCFG);
2142 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
2143 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
2144 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
2145
2146 writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
2147 csr = WOL_WAKECSR_ENABLE;
2148 if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
2149 csr |= WOL_WAKECSR_MII;
2150 writel(csr, gp->regs + WOL_WAKECSR);
2151 } else {
2152 writel(0, gp->regs + MAC_RXCFG);
2153 (void)readl(gp->regs + MAC_RXCFG);
2154 /* Machine sleep will die in strange ways if we
2155 * dont wait a bit here, looks like the chip takes
2156 * some time to really shut down
2157 */
2158 msleep(10);
2159 }
2160
2161 writel(0, gp->regs + MAC_TXCFG);
2162 writel(0, gp->regs + MAC_XIFCFG);
2163 writel(0, gp->regs + TXDMA_CFG);
2164 writel(0, gp->regs + RXDMA_CFG);
2165
2166 if (!wol) {
2167 spin_lock_irqsave(&gp->lock, flags);
2168 spin_lock(&gp->tx_lock);
2169 gem_reset(gp);
2170 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
2171 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
2172 spin_unlock(&gp->tx_lock);
2173 spin_unlock_irqrestore(&gp->lock, flags);
2174
2175 /* No need to take the lock here */
2176
2177 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
2178 gp->phy_mii.def->ops->suspend(&gp->phy_mii);
2179
2180 /* According to Apple, we must set the MDIO pins to this begnign
2181 * state or we may 1) eat more current, 2) damage some PHYs
2182 */
2183 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
2184 writel(0, gp->regs + MIF_BBCLK);
2185 writel(0, gp->regs + MIF_BBDATA);
2186 writel(0, gp->regs + MIF_BBOENAB);
2187 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
2188 (void) readl(gp->regs + MAC_XIFCFG);
2189 }
2190}
2191
2192
2193static int gem_do_start(struct net_device *dev)
2194{
2195 struct gem *gp = dev->priv;
2196 unsigned long flags;
2197
2198 spin_lock_irqsave(&gp->lock, flags);
2199 spin_lock(&gp->tx_lock);
2200
2201 /* Enable the cell */
2202 gem_get_cell(gp);
2203
2204 /* Init & setup chip hardware */
2205 gem_reinit_chip(gp);
2206
2207 gp->running = 1;
2208
2209 if (gp->lstate == link_up) {
2210 netif_carrier_on(gp->dev);
2211 gem_set_link_modes(gp);
2212 }
2213
2214 netif_wake_queue(gp->dev);
2215
2216 spin_unlock(&gp->tx_lock);
2217 spin_unlock_irqrestore(&gp->lock, flags);
2218
2219 if (request_irq(gp->pdev->irq, gem_interrupt,
2220 SA_SHIRQ, dev->name, (void *)dev)) {
2221 printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
2222
2223 spin_lock_irqsave(&gp->lock, flags);
2224 spin_lock(&gp->tx_lock);
2225
2226 gp->running = 0;
2227 gem_reset(gp);
2228 gem_clean_rings(gp);
2229 gem_put_cell(gp);
2230
2231 spin_unlock(&gp->tx_lock);
2232 spin_unlock_irqrestore(&gp->lock, flags);
2233
2234 return -EAGAIN;
2235 }
2236
2237 return 0;
2238}
2239
2240static void gem_do_stop(struct net_device *dev, int wol)
2241{
2242 struct gem *gp = dev->priv;
2243 unsigned long flags;
2244
2245 spin_lock_irqsave(&gp->lock, flags);
2246 spin_lock(&gp->tx_lock);
2247
2248 gp->running = 0;
2249
2250 /* Stop netif queue */
2251 netif_stop_queue(dev);
2252
2253 /* Make sure ints are disabled */
2254 gem_disable_ints(gp);
2255
2256 /* We can drop the lock now */
2257 spin_unlock(&gp->tx_lock);
2258 spin_unlock_irqrestore(&gp->lock, flags);
2259
2260 /* If we are going to sleep with WOL */
2261 gem_stop_dma(gp);
2262 msleep(10);
2263 if (!wol)
2264 gem_reset(gp);
2265 msleep(10);
2266
2267 /* Get rid of rings */
2268 gem_clean_rings(gp);
2269
2270 /* No irq needed anymore */
2271 free_irq(gp->pdev->irq, (void *) dev);
2272
2273 /* Cell not needed neither if no WOL */
2274 if (!wol) {
2275 spin_lock_irqsave(&gp->lock, flags);
2276 gem_put_cell(gp);
2277 spin_unlock_irqrestore(&gp->lock, flags);
2278 }
2279}
2280
2281static void gem_reset_task(void *data)
2282{
2283 struct gem *gp = (struct gem *) data;
2284
2285 down(&gp->pm_sem);
2286
2287 netif_poll_disable(gp->dev);
2288
2289 spin_lock_irq(&gp->lock);
2290 spin_lock(&gp->tx_lock);
2291
2292 if (gp->running == 0)
2293 goto not_running;
2294
2295 if (gp->running) {
2296 netif_stop_queue(gp->dev);
2297
2298 /* Reset the chip & rings */
2299 gem_reinit_chip(gp);
2300 if (gp->lstate == link_up)
2301 gem_set_link_modes(gp);
2302 netif_wake_queue(gp->dev);
2303 }
2304 not_running:
2305 gp->reset_task_pending = 0;
2306
2307 spin_unlock(&gp->tx_lock);
2308 spin_unlock_irq(&gp->lock);
2309
2310 netif_poll_enable(gp->dev);
2311
2312 up(&gp->pm_sem);
2313}
2314
2315
2316static int gem_open(struct net_device *dev)
2317{
2318 struct gem *gp = dev->priv;
2319 int rc = 0;
2320
2321 down(&gp->pm_sem);
2322
2323 /* We need the cell enabled */
2324 if (!gp->asleep)
2325 rc = gem_do_start(dev);
2326 gp->opened = (rc == 0);
2327
2328 up(&gp->pm_sem);
2329
2330 return rc;
2331}
2332
2333static int gem_close(struct net_device *dev)
2334{
2335 struct gem *gp = dev->priv;
2336
2337 /* Note: we don't need to call netif_poll_disable() here because
2338 * our caller (dev_close) already did it for us
2339 */
2340
2341 down(&gp->pm_sem);
2342
2343 gp->opened = 0;
2344 if (!gp->asleep)
2345 gem_do_stop(dev, 0);
2346
2347 up(&gp->pm_sem);
2348
2349 return 0;
2350}
2351
2352#ifdef CONFIG_PM
2353static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2354{
2355 struct net_device *dev = pci_get_drvdata(pdev);
2356 struct gem *gp = dev->priv;
2357 unsigned long flags;
2358
2359 down(&gp->pm_sem);
2360
2361 netif_poll_disable(dev);
2362
2363 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n",
2364 dev->name,
2365 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
2366
2367 /* Keep the cell enabled during the entire operation */
2368 spin_lock_irqsave(&gp->lock, flags);
2369 spin_lock(&gp->tx_lock);
2370 gem_get_cell(gp);
2371 spin_unlock(&gp->tx_lock);
2372 spin_unlock_irqrestore(&gp->lock, flags);
2373
2374 /* If the driver is opened, we stop the MAC */
2375 if (gp->opened) {
2376 /* Stop traffic, mark us closed */
2377 netif_device_detach(dev);
2378
2379 /* Switch off MAC, remember WOL setting */
2380 gp->asleep_wol = gp->wake_on_lan;
2381 gem_do_stop(dev, gp->asleep_wol);
2382 } else
2383 gp->asleep_wol = 0;
2384
2385 /* Mark us asleep */
2386 gp->asleep = 1;
2387 wmb();
2388
2389 /* Stop the link timer */
2390 del_timer_sync(&gp->link_timer);
2391
2392 /* Now we release the semaphore to not block the reset task who
2393 * can take it too. We are marked asleep, so there will be no
2394 * conflict here
2395 */
2396 up(&gp->pm_sem);
2397
2398 /* Wait for a pending reset task to complete */
2399 while (gp->reset_task_pending)
2400 yield();
2401 flush_scheduled_work();
2402
2403 /* Shut the PHY down eventually and setup WOL */
2404 gem_stop_phy(gp, gp->asleep_wol);
2405
2406 /* Make sure bus master is disabled */
2407 pci_disable_device(gp->pdev);
2408
2409 /* Release the cell, no need to take a lock at this point since
2410 * nothing else can happen now
2411 */
2412 gem_put_cell(gp);
2413
2414 return 0;
2415}
2416
2417static int gem_resume(struct pci_dev *pdev)
2418{
2419 struct net_device *dev = pci_get_drvdata(pdev);
2420 struct gem *gp = dev->priv;
2421 unsigned long flags;
2422
2423 printk(KERN_INFO "%s: resuming\n", dev->name);
2424
2425 down(&gp->pm_sem);
2426
2427 /* Keep the cell enabled during the entire operation, no need to
2428 * take a lock here tho since nothing else can happen while we are
2429 * marked asleep
2430 */
2431 gem_get_cell(gp);
2432
2433 /* Make sure PCI access and bus master are enabled */
2434 if (pci_enable_device(gp->pdev)) {
2435 printk(KERN_ERR "%s: Can't re-enable chip !\n",
2436 dev->name);
2437 /* Put cell and forget it for now, it will be considered as
2438 * still asleep, a new sleep cycle may bring it back
2439 */
2440 gem_put_cell(gp);
2441 up(&gp->pm_sem);
2442 return 0;
2443 }
2444 pci_set_master(gp->pdev);
2445
2446 /* Reset everything */
2447 gem_reset(gp);
2448
2449 /* Mark us woken up */
2450 gp->asleep = 0;
2451 wmb();
2452
2453 /* Bring the PHY back. Again, lock is useless at this point as
2454 * nothing can be happening until we restart the whole thing
2455 */
2456 gem_init_phy(gp);
2457
2458 /* If we were opened, bring everything back */
2459 if (gp->opened) {
2460 /* Restart MAC */
2461 gem_do_start(dev);
2462
2463 /* Re-attach net device */
2464 netif_device_attach(dev);
2465
2466 }
2467
2468 spin_lock_irqsave(&gp->lock, flags);
2469 spin_lock(&gp->tx_lock);
2470
2471 /* If we had WOL enabled, the cell clock was never turned off during
2472 * sleep, so we end up beeing unbalanced. Fix that here
2473 */
2474 if (gp->asleep_wol)
2475 gem_put_cell(gp);
2476
2477 /* This function doesn't need to hold the cell, it will be held if the
2478 * driver is open by gem_do_start().
2479 */
2480 gem_put_cell(gp);
2481
2482 spin_unlock(&gp->tx_lock);
2483 spin_unlock_irqrestore(&gp->lock, flags);
2484
2485 netif_poll_enable(dev);
2486
2487 up(&gp->pm_sem);
2488
2489 return 0;
2490}
2491#endif /* CONFIG_PM */
2492
2493static struct net_device_stats *gem_get_stats(struct net_device *dev)
2494{
2495 struct gem *gp = dev->priv;
2496 struct net_device_stats *stats = &gp->net_stats;
2497
2498 spin_lock_irq(&gp->lock);
2499 spin_lock(&gp->tx_lock);
2500
2501 /* I have seen this being called while the PM was in progress,
2502 * so we shield against this
2503 */
2504 if (gp->running) {
2505 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2506 writel(0, gp->regs + MAC_FCSERR);
2507
2508 stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
2509 writel(0, gp->regs + MAC_AERR);
2510
2511 stats->rx_length_errors += readl(gp->regs + MAC_LERR);
2512 writel(0, gp->regs + MAC_LERR);
2513
2514 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2515 stats->collisions +=
2516 (readl(gp->regs + MAC_ECOLL) +
2517 readl(gp->regs + MAC_LCOLL));
2518 writel(0, gp->regs + MAC_ECOLL);
2519 writel(0, gp->regs + MAC_LCOLL);
2520 }
2521
2522 spin_unlock(&gp->tx_lock);
2523 spin_unlock_irq(&gp->lock);
2524
2525 return &gp->net_stats;
2526}
2527
2528static void gem_set_multicast(struct net_device *dev)
2529{
2530 struct gem *gp = dev->priv;
2531 u32 rxcfg, rxcfg_new;
2532 int limit = 10000;
2533
2534
2535 spin_lock_irq(&gp->lock);
2536 spin_lock(&gp->tx_lock);
2537
2538 if (!gp->running)
2539 goto bail;
2540
2541 netif_stop_queue(dev);
2542
2543 rxcfg = readl(gp->regs + MAC_RXCFG);
2544 rxcfg_new = gem_setup_multicast(gp);
2545#ifdef STRIP_FCS
2546 rxcfg_new |= MAC_RXCFG_SFCS;
2547#endif
2548 gp->mac_rx_cfg = rxcfg_new;
2549
2550 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
2551 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
2552 if (!limit--)
2553 break;
2554 udelay(10);
2555 }
2556
2557 rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
2558 rxcfg |= rxcfg_new;
2559
2560 writel(rxcfg, gp->regs + MAC_RXCFG);
2561
2562 netif_wake_queue(dev);
2563
2564 bail:
2565 spin_unlock(&gp->tx_lock);
2566 spin_unlock_irq(&gp->lock);
2567}
2568
2569/* Jumbo-grams don't seem to work :-( */
2570#define GEM_MIN_MTU 68
2571#if 1
2572#define GEM_MAX_MTU 1500
2573#else
2574#define GEM_MAX_MTU 9000
2575#endif
2576
2577static int gem_change_mtu(struct net_device *dev, int new_mtu)
2578{
2579 struct gem *gp = dev->priv;
2580
2581 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
2582 return -EINVAL;
2583
2584 if (!netif_running(dev) || !netif_device_present(dev)) {
2585 /* We'll just catch it later when the
2586 * device is up'd or resumed.
2587 */
2588 dev->mtu = new_mtu;
2589 return 0;
2590 }
2591
2592 down(&gp->pm_sem);
2593 spin_lock_irq(&gp->lock);
2594 spin_lock(&gp->tx_lock);
2595 dev->mtu = new_mtu;
2596 if (gp->running) {
2597 gem_reinit_chip(gp);
2598 if (gp->lstate == link_up)
2599 gem_set_link_modes(gp);
2600 }
2601 spin_unlock(&gp->tx_lock);
2602 spin_unlock_irq(&gp->lock);
2603 up(&gp->pm_sem);
2604
2605 return 0;
2606}
2607
2608static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2609{
2610 struct gem *gp = dev->priv;
2611
2612 strcpy(info->driver, DRV_NAME);
2613 strcpy(info->version, DRV_VERSION);
2614 strcpy(info->bus_info, pci_name(gp->pdev));
2615}
2616
2617static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2618{
2619 struct gem *gp = dev->priv;
2620
2621 if (gp->phy_type == phy_mii_mdio0 ||
2622 gp->phy_type == phy_mii_mdio1) {
2623 if (gp->phy_mii.def)
2624 cmd->supported = gp->phy_mii.def->features;
2625 else
2626 cmd->supported = (SUPPORTED_10baseT_Half |
2627 SUPPORTED_10baseT_Full);
2628
2629 /* XXX hardcoded stuff for now */
2630 cmd->port = PORT_MII;
2631 cmd->transceiver = XCVR_EXTERNAL;
2632 cmd->phy_address = 0; /* XXX fixed PHYAD */
2633
2634 /* Return current PHY settings */
2635 spin_lock_irq(&gp->lock);
2636 cmd->autoneg = gp->want_autoneg;
2637 cmd->speed = gp->phy_mii.speed;
2638 cmd->duplex = gp->phy_mii.duplex;
2639 cmd->advertising = gp->phy_mii.advertising;
2640
2641 /* If we started with a forced mode, we don't have a default
2642 * advertise set, we need to return something sensible so
2643 * userland can re-enable autoneg properly.
2644 */
2645 if (cmd->advertising == 0)
2646 cmd->advertising = cmd->supported;
2647 spin_unlock_irq(&gp->lock);
2648 } else { // XXX PCS ?
2649 cmd->supported =
2650 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2651 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2652 SUPPORTED_Autoneg);
2653 cmd->advertising = cmd->supported;
2654 cmd->speed = 0;
2655 cmd->duplex = cmd->port = cmd->phy_address =
2656 cmd->transceiver = cmd->autoneg = 0;
2657 }
2658 cmd->maxtxpkt = cmd->maxrxpkt = 0;
2659
2660 return 0;
2661}
2662
2663static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2664{
2665 struct gem *gp = dev->priv;
2666
2667 /* Verify the settings we care about. */
2668 if (cmd->autoneg != AUTONEG_ENABLE &&
2669 cmd->autoneg != AUTONEG_DISABLE)
2670 return -EINVAL;
2671
2672 if (cmd->autoneg == AUTONEG_ENABLE &&
2673 cmd->advertising == 0)
2674 return -EINVAL;
2675
2676 if (cmd->autoneg == AUTONEG_DISABLE &&
2677 ((cmd->speed != SPEED_1000 &&
2678 cmd->speed != SPEED_100 &&
2679 cmd->speed != SPEED_10) ||
2680 (cmd->duplex != DUPLEX_HALF &&
2681 cmd->duplex != DUPLEX_FULL)))
2682 return -EINVAL;
2683
2684 /* Apply settings and restart link process. */
2685 spin_lock_irq(&gp->lock);
2686 gem_get_cell(gp);
2687 gem_begin_auto_negotiation(gp, cmd);
2688 gem_put_cell(gp);
2689 spin_unlock_irq(&gp->lock);
2690
2691 return 0;
2692}
2693
2694static int gem_nway_reset(struct net_device *dev)
2695{
2696 struct gem *gp = dev->priv;
2697
2698 if (!gp->want_autoneg)
2699 return -EINVAL;
2700
2701 /* Restart link process. */
2702 spin_lock_irq(&gp->lock);
2703 gem_get_cell(gp);
2704 gem_begin_auto_negotiation(gp, NULL);
2705 gem_put_cell(gp);
2706 spin_unlock_irq(&gp->lock);
2707
2708 return 0;
2709}
2710
2711static u32 gem_get_msglevel(struct net_device *dev)
2712{
2713 struct gem *gp = dev->priv;
2714 return gp->msg_enable;
2715}
2716
2717static void gem_set_msglevel(struct net_device *dev, u32 value)
2718{
2719 struct gem *gp = dev->priv;
2720 gp->msg_enable = value;
2721}
2722
2723
2724/* Add more when I understand how to program the chip */
2725/* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
2726
2727#define WOL_SUPPORTED_MASK (WAKE_MAGIC)
2728
2729static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2730{
2731 struct gem *gp = dev->priv;
2732
2733 /* Add more when I understand how to program the chip */
2734 if (gp->has_wol) {
2735 wol->supported = WOL_SUPPORTED_MASK;
2736 wol->wolopts = gp->wake_on_lan;
2737 } else {
2738 wol->supported = 0;
2739 wol->wolopts = 0;
2740 }
2741}
2742
2743static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2744{
2745 struct gem *gp = dev->priv;
2746
2747 if (!gp->has_wol)
2748 return -EOPNOTSUPP;
2749 gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
2750 return 0;
2751}
2752
2753static struct ethtool_ops gem_ethtool_ops = {
2754 .get_drvinfo = gem_get_drvinfo,
2755 .get_link = ethtool_op_get_link,
2756 .get_settings = gem_get_settings,
2757 .set_settings = gem_set_settings,
2758 .nway_reset = gem_nway_reset,
2759 .get_msglevel = gem_get_msglevel,
2760 .set_msglevel = gem_set_msglevel,
2761 .get_wol = gem_get_wol,
2762 .set_wol = gem_set_wol,
2763};
2764
2765static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2766{
2767 struct gem *gp = dev->priv;
2768 struct mii_ioctl_data *data = if_mii(ifr);
2769 int rc = -EOPNOTSUPP;
2770 unsigned long flags;
2771
2772 /* Hold the PM semaphore while doing ioctl's or we may collide
2773 * with power management.
2774 */
2775 down(&gp->pm_sem);
2776
2777 spin_lock_irqsave(&gp->lock, flags);
2778 gem_get_cell(gp);
2779 spin_unlock_irqrestore(&gp->lock, flags);
2780
2781 switch (cmd) {
2782 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2783 data->phy_id = gp->mii_phy_addr;
2784 /* Fallthrough... */
2785
2786 case SIOCGMIIREG: /* Read MII PHY register. */
2787 if (!gp->running)
2788 rc = -EAGAIN;
2789 else {
2790 data->val_out = __phy_read(gp, data->phy_id & 0x1f,
2791 data->reg_num & 0x1f);
2792 rc = 0;
2793 }
2794 break;
2795
2796 case SIOCSMIIREG: /* Write MII PHY register. */
2797 if (!capable(CAP_NET_ADMIN))
2798 rc = -EPERM;
2799 else if (!gp->running)
2800 rc = -EAGAIN;
2801 else {
2802 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
2803 data->val_in);
2804 rc = 0;
2805 }
2806 break;
2807 };
2808
2809 spin_lock_irqsave(&gp->lock, flags);
2810 gem_put_cell(gp);
2811 spin_unlock_irqrestore(&gp->lock, flags);
2812
2813 up(&gp->pm_sem);
2814
2815 return rc;
2816}
2817
2818#if (!defined(__sparc__) && !defined(CONFIG_PPC_PMAC))
2819/* Fetch MAC address from vital product data of PCI ROM. */
2820static void find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
2821{
2822 int this_offset;
2823
2824 for (this_offset = 0x20; this_offset < len; this_offset++) {
2825 void __iomem *p = rom_base + this_offset;
2826 int i;
2827
2828 if (readb(p + 0) != 0x90 ||
2829 readb(p + 1) != 0x00 ||
2830 readb(p + 2) != 0x09 ||
2831 readb(p + 3) != 0x4e ||
2832 readb(p + 4) != 0x41 ||
2833 readb(p + 5) != 0x06)
2834 continue;
2835
2836 this_offset += 6;
2837 p += 6;
2838
2839 for (i = 0; i < 6; i++)
2840 dev_addr[i] = readb(p + i);
2841 break;
2842 }
2843}
2844
2845static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
2846{
2847 u32 rom_reg_orig;
2848 void __iomem *p;
2849
2850 if (pdev->resource[PCI_ROM_RESOURCE].parent == NULL) {
2851 if (pci_assign_resource(pdev, PCI_ROM_RESOURCE) < 0)
2852 goto use_random;
2853 }
2854
2855 pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_reg_orig);
2856 pci_write_config_dword(pdev, pdev->rom_base_reg,
2857 rom_reg_orig | PCI_ROM_ADDRESS_ENABLE);
2858
2859 p = ioremap(pci_resource_start(pdev, PCI_ROM_RESOURCE), (64 * 1024));
2860 if (p != NULL && readb(p) == 0x55 && readb(p + 1) == 0xaa)
2861 find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
2862
2863 if (p != NULL)
2864 iounmap(p);
2865
2866 pci_write_config_dword(pdev, pdev->rom_base_reg, rom_reg_orig);
2867 return;
2868
2869use_random:
2870 /* Sun MAC prefix then 3 random bytes. */
2871 dev_addr[0] = 0x08;
2872 dev_addr[1] = 0x00;
2873 dev_addr[2] = 0x20;
2874 get_random_bytes(dev_addr + 3, 3);
2875 return;
2876}
2877#endif /* not Sparc and not PPC */
2878
2879static int __devinit gem_get_device_address(struct gem *gp)
2880{
2881#if defined(__sparc__) || defined(CONFIG_PPC_PMAC)
2882 struct net_device *dev = gp->dev;
2883#endif
2884
2885#if defined(__sparc__)
2886 struct pci_dev *pdev = gp->pdev;
2887 struct pcidev_cookie *pcp = pdev->sysdata;
2888 int node = -1;
2889
2890 if (pcp != NULL) {
2891 node = pcp->prom_node;
2892 if (prom_getproplen(node, "local-mac-address") == 6)
2893 prom_getproperty(node, "local-mac-address",
2894 dev->dev_addr, 6);
2895 else
2896 node = -1;
2897 }
2898 if (node == -1)
2899 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2900#elif defined(CONFIG_PPC_PMAC)
2901 unsigned char *addr;
2902
2903 addr = get_property(gp->of_node, "local-mac-address", NULL);
2904 if (addr == NULL) {
2905 printk("\n");
2906 printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
2907 return -1;
2908 }
2909 memcpy(dev->dev_addr, addr, 6);
2910#else
2911 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
2912#endif
2913 return 0;
2914}
2915
2916static void __devexit gem_remove_one(struct pci_dev *pdev)
2917{
2918 struct net_device *dev = pci_get_drvdata(pdev);
2919
2920 if (dev) {
2921 struct gem *gp = dev->priv;
2922
2923 unregister_netdev(dev);
2924
2925 /* Stop the link timer */
2926 del_timer_sync(&gp->link_timer);
2927
2928 /* We shouldn't need any locking here */
2929 gem_get_cell(gp);
2930
2931 /* Wait for a pending reset task to complete */
2932 while (gp->reset_task_pending)
2933 yield();
2934 flush_scheduled_work();
2935
2936 /* Shut the PHY down */
2937 gem_stop_phy(gp, 0);
2938
2939 gem_put_cell(gp);
2940
2941 /* Make sure bus master is disabled */
2942 pci_disable_device(gp->pdev);
2943
2944 /* Free resources */
2945 pci_free_consistent(pdev,
2946 sizeof(struct gem_init_block),
2947 gp->init_block,
2948 gp->gblock_dvma);
2949 iounmap(gp->regs);
2950 pci_release_regions(pdev);
2951 free_netdev(dev);
2952
2953 pci_set_drvdata(pdev, NULL);
2954 }
2955}
2956
2957static int __devinit gem_init_one(struct pci_dev *pdev,
2958 const struct pci_device_id *ent)
2959{
2960 static int gem_version_printed = 0;
2961 unsigned long gemreg_base, gemreg_len;
2962 struct net_device *dev;
2963 struct gem *gp;
2964 int i, err, pci_using_dac;
2965
2966 if (gem_version_printed++ == 0)
2967 printk(KERN_INFO "%s", version);
2968
2969 /* Apple gmac note: during probe, the chip is powered up by
2970 * the arch code to allow the code below to work (and to let
2971 * the chip be probed on the config space. It won't stay powered
2972 * up until the interface is brought up however, so we can't rely
2973 * on register configuration done at this point.
2974 */
2975 err = pci_enable_device(pdev);
2976 if (err) {
2977 printk(KERN_ERR PFX "Cannot enable MMIO operation, "
2978 "aborting.\n");
2979 return err;
2980 }
2981 pci_set_master(pdev);
2982
2983 /* Configure DMA attributes. */
2984
2985 /* All of the GEM documentation states that 64-bit DMA addressing
2986 * is fully supported and should work just fine. However the
2987 * front end for RIO based GEMs is different and only supports
2988 * 32-bit addressing.
2989 *
2990 * For now we assume the various PPC GEMs are 32-bit only as well.
2991 */
2992 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
2993 pdev->device == PCI_DEVICE_ID_SUN_GEM &&
1e7f0bd8 2994 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
2995 pci_using_dac = 1;
2996 } else {
1e7f0bd8 2997 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1da177e4
LT
2998 if (err) {
2999 printk(KERN_ERR PFX "No usable DMA configuration, "
3000 "aborting.\n");
3001 goto err_disable_device;
3002 }
3003 pci_using_dac = 0;
3004 }
3005
3006 gemreg_base = pci_resource_start(pdev, 0);
3007 gemreg_len = pci_resource_len(pdev, 0);
3008
3009 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
3010 printk(KERN_ERR PFX "Cannot find proper PCI device "
3011 "base address, aborting.\n");
3012 err = -ENODEV;
3013 goto err_disable_device;
3014 }
3015
3016 dev = alloc_etherdev(sizeof(*gp));
3017 if (!dev) {
3018 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
3019 err = -ENOMEM;
3020 goto err_disable_device;
3021 }
3022 SET_MODULE_OWNER(dev);
3023 SET_NETDEV_DEV(dev, &pdev->dev);
3024
3025 gp = dev->priv;
3026
3027 err = pci_request_regions(pdev, DRV_NAME);
3028 if (err) {
3029 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
3030 "aborting.\n");
3031 goto err_out_free_netdev;
3032 }
3033
3034 gp->pdev = pdev;
3035 dev->base_addr = (long) pdev;
3036 gp->dev = dev;
3037
3038 gp->msg_enable = DEFAULT_MSG;
3039
3040 spin_lock_init(&gp->lock);
3041 spin_lock_init(&gp->tx_lock);
3042 init_MUTEX(&gp->pm_sem);
3043
3044 init_timer(&gp->link_timer);
3045 gp->link_timer.function = gem_link_timer;
3046 gp->link_timer.data = (unsigned long) gp;
3047
3048 INIT_WORK(&gp->reset_task, gem_reset_task, gp);
3049
3050 gp->lstate = link_down;
3051 gp->timer_ticks = 0;
3052 netif_carrier_off(dev);
3053
3054 gp->regs = ioremap(gemreg_base, gemreg_len);
3055 if (gp->regs == 0UL) {
3056 printk(KERN_ERR PFX "Cannot map device registers, "
3057 "aborting.\n");
3058 err = -EIO;
3059 goto err_out_free_res;
3060 }
3061
3062 /* On Apple, we want a reference to the Open Firmware device-tree
3063 * node. We use it for clock control.
3064 */
3065#ifdef CONFIG_PPC_PMAC
3066 gp->of_node = pci_device_to_OF_node(pdev);
3067#endif
3068
3069 /* Only Apple version supports WOL afaik */
3070 if (pdev->vendor == PCI_VENDOR_ID_APPLE)
3071 gp->has_wol = 1;
3072
3073 /* Make sure cell is enabled */
3074 gem_get_cell(gp);
3075
3076 /* Make sure everything is stopped and in init state */
3077 gem_reset(gp);
3078
3079 /* Fill up the mii_phy structure (even if we won't use it) */
3080 gp->phy_mii.dev = dev;
3081 gp->phy_mii.mdio_read = _phy_read;
3082 gp->phy_mii.mdio_write = _phy_write;
3c326fe9
BH
3083#ifdef CONFIG_PPC_PMAC
3084 gp->phy_mii.platform_data = gp->of_node;
3085#endif
1da177e4
LT
3086 /* By default, we start with autoneg */
3087 gp->want_autoneg = 1;
3088
3089 /* Check fifo sizes, PHY type, etc... */
3090 if (gem_check_invariants(gp)) {
3091 err = -ENODEV;
3092 goto err_out_iounmap;
3093 }
3094
3095 /* It is guaranteed that the returned buffer will be at least
3096 * PAGE_SIZE aligned.
3097 */
3098 gp->init_block = (struct gem_init_block *)
3099 pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
3100 &gp->gblock_dvma);
3101 if (!gp->init_block) {
3102 printk(KERN_ERR PFX "Cannot allocate init block, "
3103 "aborting.\n");
3104 err = -ENOMEM;
3105 goto err_out_iounmap;
3106 }
3107
3108 if (gem_get_device_address(gp))
3109 goto err_out_free_consistent;
3110
3111 dev->open = gem_open;
3112 dev->stop = gem_close;
3113 dev->hard_start_xmit = gem_start_xmit;
3114 dev->get_stats = gem_get_stats;
3115 dev->set_multicast_list = gem_set_multicast;
3116 dev->do_ioctl = gem_ioctl;
3117 dev->poll = gem_poll;
3118 dev->weight = 64;
3119 dev->ethtool_ops = &gem_ethtool_ops;
3120 dev->tx_timeout = gem_tx_timeout;
3121 dev->watchdog_timeo = 5 * HZ;
3122 dev->change_mtu = gem_change_mtu;
3123 dev->irq = pdev->irq;
3124 dev->dma = 0;
3125#ifdef CONFIG_NET_POLL_CONTROLLER
3126 dev->poll_controller = gem_poll_controller;
3127#endif
3128
3129 /* Set that now, in case PM kicks in now */
3130 pci_set_drvdata(pdev, dev);
3131
3132 /* Detect & init PHY, start autoneg, we release the cell now
3133 * too, it will be managed by whoever needs it
3134 */
3135 gem_init_phy(gp);
3136
3137 spin_lock_irq(&gp->lock);
3138 gem_put_cell(gp);
3139 spin_unlock_irq(&gp->lock);
3140
3141 /* Register with kernel */
3142 if (register_netdev(dev)) {
3143 printk(KERN_ERR PFX "Cannot register net device, "
3144 "aborting.\n");
3145 err = -ENOMEM;
3146 goto err_out_free_consistent;
3147 }
3148
3149 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ",
3150 dev->name);
3151 for (i = 0; i < 6; i++)
3152 printk("%2.2x%c", dev->dev_addr[i],
3153 i == 5 ? ' ' : ':');
3154 printk("\n");
3155
3156 if (gp->phy_type == phy_mii_mdio0 ||
3157 gp->phy_type == phy_mii_mdio1)
3158 printk(KERN_INFO "%s: Found %s PHY\n", dev->name,
3159 gp->phy_mii.def ? gp->phy_mii.def->name : "no");
3160
3161 /* GEM can do it all... */
3162 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX;
3163 if (pci_using_dac)
3164 dev->features |= NETIF_F_HIGHDMA;
3165
3166 return 0;
3167
3168err_out_free_consistent:
3169 gem_remove_one(pdev);
3170err_out_iounmap:
3171 gem_put_cell(gp);
3172 iounmap(gp->regs);
3173
3174err_out_free_res:
3175 pci_release_regions(pdev);
3176
3177err_out_free_netdev:
3178 free_netdev(dev);
3179err_disable_device:
3180 pci_disable_device(pdev);
3181 return err;
3182
3183}
3184
3185
3186static struct pci_driver gem_driver = {
3187 .name = GEM_MODULE_NAME,
3188 .id_table = gem_pci_tbl,
3189 .probe = gem_init_one,
3190 .remove = __devexit_p(gem_remove_one),
3191#ifdef CONFIG_PM
3192 .suspend = gem_suspend,
3193 .resume = gem_resume,
3194#endif /* CONFIG_PM */
3195};
3196
3197static int __init gem_init(void)
3198{
3199 return pci_module_init(&gem_driver);
3200}
3201
3202static void __exit gem_cleanup(void)
3203{
3204 pci_unregister_driver(&gem_driver);
3205}
3206
3207module_init(gem_init);
3208module_exit(gem_cleanup);