stacktrace: fix header file for !CONFIG_STACKTRACE
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / b44.c
CommitLineData
1da177e4
LT
1/* b44.c: Broadcom 4400 device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
8056bfaf 5 * Copyright (C) 2006 Broadcom Corporation.
1da177e4
LT
6 *
7 * Distribute under GPL.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/types.h>
14#include <linux/netdevice.h>
15#include <linux/ethtool.h>
16#include <linux/mii.h>
17#include <linux/if_ether.h>
72f4861e 18#include <linux/if_vlan.h>
1da177e4
LT
19#include <linux/etherdevice.h>
20#include <linux/pci.h>
21#include <linux/delay.h>
22#include <linux/init.h>
89358f90 23#include <linux/dma-mapping.h>
1da177e4
LT
24
25#include <asm/uaccess.h>
26#include <asm/io.h>
27#include <asm/irq.h>
28
29#include "b44.h"
30
31#define DRV_MODULE_NAME "b44"
32#define PFX DRV_MODULE_NAME ": "
4d1dabdb
GZ
33#define DRV_MODULE_VERSION "1.01"
34#define DRV_MODULE_RELDATE "Jun 16, 2006"
1da177e4
LT
35
36#define B44_DEF_MSG_ENABLE \
37 (NETIF_MSG_DRV | \
38 NETIF_MSG_PROBE | \
39 NETIF_MSG_LINK | \
40 NETIF_MSG_TIMER | \
41 NETIF_MSG_IFDOWN | \
42 NETIF_MSG_IFUP | \
43 NETIF_MSG_RX_ERR | \
44 NETIF_MSG_TX_ERR)
45
46/* length of time before we decide the hardware is borked,
47 * and dev->tx_timeout() should be called to fix the problem
48 */
49#define B44_TX_TIMEOUT (5 * HZ)
50
51/* hardware minimum and maximum for a single frame's data payload */
52#define B44_MIN_MTU 60
53#define B44_MAX_MTU 1500
54
55#define B44_RX_RING_SIZE 512
56#define B44_DEF_RX_RING_PENDING 200
57#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
58 B44_RX_RING_SIZE)
59#define B44_TX_RING_SIZE 512
60#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
61#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
62 B44_TX_RING_SIZE)
1da177e4
LT
63
64#define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66#define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72f4861e
SH
72#define RX_PKT_OFFSET 30
73#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET + 64)
1da177e4
LT
74
75/* minimum number of free TX descriptors required to wake up TX process */
76#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
77
725ad800
GZ
78/* b44 internal pattern match filter info */
79#define B44_PATTERN_BASE 0x400
80#define B44_PATTERN_SIZE 0x80
81#define B44_PMASK_BASE 0x600
82#define B44_PMASK_SIZE 0x10
83#define B44_MAX_PATTERNS 16
84#define B44_ETHIPV6UDP_HLEN 62
85#define B44_ETHIPV4UDP_HLEN 42
86
1da177e4
LT
87static char version[] __devinitdata =
88 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
89
90MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92MODULE_LICENSE("GPL");
93MODULE_VERSION(DRV_MODULE_VERSION);
94
95static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
96module_param(b44_debug, int, 0);
97MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98
99static struct pci_device_id b44_pci_tbl[] = {
100 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106 { } /* terminate list with empty entry */
107};
108
109MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
110
111static void b44_halt(struct b44 *);
112static void b44_init_rings(struct b44 *);
5fc7d61a
MC
113
114#define B44_FULL_RESET 1
115#define B44_FULL_RESET_SKIP_PHY 2
116#define B44_PARTIAL_RESET 3
117
00e8b3aa 118static void b44_init_hw(struct b44 *, int);
1da177e4 119
9f38c636
JL
120static int dma_desc_align_mask;
121static int dma_desc_sync_size;
122
3353930d
FR
123static const char b44_gstrings[][ETH_GSTRING_LEN] = {
124#define _B44(x...) # x,
125B44_STAT_REG_DECLARE
126#undef _B44
127};
128
9f38c636
JL
129static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
130 dma_addr_t dma_base,
131 unsigned long offset,
132 enum dma_data_direction dir)
133{
134 dma_sync_single_range_for_device(&pdev->dev, dma_base,
135 offset & dma_desc_align_mask,
136 dma_desc_sync_size, dir);
137}
138
139static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
140 dma_addr_t dma_base,
141 unsigned long offset,
142 enum dma_data_direction dir)
143{
144 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
145 offset & dma_desc_align_mask,
146 dma_desc_sync_size, dir);
147}
148
1da177e4
LT
149static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
150{
151 return readl(bp->regs + reg);
152}
153
10badc21 154static inline void bw32(const struct b44 *bp,
1da177e4
LT
155 unsigned long reg, unsigned long val)
156{
157 writel(val, bp->regs + reg);
158}
159
160static int b44_wait_bit(struct b44 *bp, unsigned long reg,
161 u32 bit, unsigned long timeout, const int clear)
162{
163 unsigned long i;
164
165 for (i = 0; i < timeout; i++) {
166 u32 val = br32(bp, reg);
167
168 if (clear && !(val & bit))
169 break;
170 if (!clear && (val & bit))
171 break;
172 udelay(10);
173 }
174 if (i == timeout) {
175 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
176 "%lx to %s.\n",
177 bp->dev->name,
178 bit, reg,
179 (clear ? "clear" : "set"));
180 return -ENODEV;
181 }
182 return 0;
183}
184
185/* Sonics SiliconBackplane support routines. ROFL, you should see all the
186 * buzz words used on this company's website :-)
187 *
188 * All of these routines must be invoked with bp->lock held and
189 * interrupts disabled.
190 */
191
192#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
193#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
194
195static u32 ssb_get_core_rev(struct b44 *bp)
196{
197 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
198}
199
200static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
201{
202 u32 bar_orig, pci_rev, val;
203
204 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
205 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
206 pci_rev = ssb_get_core_rev(bp);
207
208 val = br32(bp, B44_SBINTVEC);
209 val |= cores;
210 bw32(bp, B44_SBINTVEC, val);
211
212 val = br32(bp, SSB_PCI_TRANS_2);
213 val |= SSB_PCI_PREF | SSB_PCI_BURST;
214 bw32(bp, SSB_PCI_TRANS_2, val);
215
216 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
217
218 return pci_rev;
219}
220
221static void ssb_core_disable(struct b44 *bp)
222{
223 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
224 return;
225
226 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
227 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
228 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
229 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
230 SBTMSLOW_REJECT | SBTMSLOW_RESET));
231 br32(bp, B44_SBTMSLOW);
232 udelay(1);
233 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
234 br32(bp, B44_SBTMSLOW);
235 udelay(1);
236}
237
238static void ssb_core_reset(struct b44 *bp)
239{
240 u32 val;
241
242 ssb_core_disable(bp);
243 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
244 br32(bp, B44_SBTMSLOW);
245 udelay(1);
246
247 /* Clear SERR if set, this is a hw bug workaround. */
248 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
249 bw32(bp, B44_SBTMSHIGH, 0);
250
251 val = br32(bp, B44_SBIMSTATE);
252 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
253 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
254
255 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
256 br32(bp, B44_SBTMSLOW);
257 udelay(1);
258
259 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
260 br32(bp, B44_SBTMSLOW);
261 udelay(1);
262}
263
264static int ssb_core_unit(struct b44 *bp)
265{
266#if 0
267 u32 val = br32(bp, B44_SBADMATCH0);
268 u32 base;
269
270 type = val & SBADMATCH0_TYPE_MASK;
271 switch (type) {
272 case 0:
273 base = val & SBADMATCH0_BS0_MASK;
274 break;
275
276 case 1:
277 base = val & SBADMATCH0_BS1_MASK;
278 break;
279
280 case 2:
281 default:
282 base = val & SBADMATCH0_BS2_MASK;
283 break;
284 };
285#endif
286 return 0;
287}
288
289static int ssb_is_core_up(struct b44 *bp)
290{
291 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
292 == SBTMSLOW_CLOCK);
293}
294
295static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
296{
297 u32 val;
298
299 val = ((u32) data[2]) << 24;
300 val |= ((u32) data[3]) << 16;
301 val |= ((u32) data[4]) << 8;
302 val |= ((u32) data[5]) << 0;
303 bw32(bp, B44_CAM_DATA_LO, val);
10badc21 304 val = (CAM_DATA_HI_VALID |
1da177e4
LT
305 (((u32) data[0]) << 8) |
306 (((u32) data[1]) << 0));
307 bw32(bp, B44_CAM_DATA_HI, val);
308 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
309 (index << CAM_CTRL_INDEX_SHIFT)));
10badc21 310 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
1da177e4
LT
311}
312
313static inline void __b44_disable_ints(struct b44 *bp)
314{
315 bw32(bp, B44_IMASK, 0);
316}
317
318static void b44_disable_ints(struct b44 *bp)
319{
320 __b44_disable_ints(bp);
321
322 /* Flush posted writes. */
323 br32(bp, B44_IMASK);
324}
325
326static void b44_enable_ints(struct b44 *bp)
327{
328 bw32(bp, B44_IMASK, bp->imask);
329}
330
331static int b44_readphy(struct b44 *bp, int reg, u32 *val)
332{
333 int err;
334
335 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
338 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339 (reg << MDIO_DATA_RA_SHIFT) |
340 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
341 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
342 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
343
344 return err;
345}
346
347static int b44_writephy(struct b44 *bp, int reg, u32 val)
348{
349 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
350 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
351 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
352 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
353 (reg << MDIO_DATA_RA_SHIFT) |
354 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
355 (val & MDIO_DATA_DATA)));
356 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
357}
358
359/* miilib interface */
360/* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
361 * due to code existing before miilib use was added to this driver.
362 * Someone should remove this artificial driver limitation in
363 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
364 */
365static int b44_mii_read(struct net_device *dev, int phy_id, int location)
366{
367 u32 val;
368 struct b44 *bp = netdev_priv(dev);
369 int rc = b44_readphy(bp, location, &val);
370 if (rc)
371 return 0xffffffff;
372 return val;
373}
374
375static void b44_mii_write(struct net_device *dev, int phy_id, int location,
376 int val)
377{
378 struct b44 *bp = netdev_priv(dev);
379 b44_writephy(bp, location, val);
380}
381
382static int b44_phy_reset(struct b44 *bp)
383{
384 u32 val;
385 int err;
386
387 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
388 if (err)
389 return err;
390 udelay(100);
391 err = b44_readphy(bp, MII_BMCR, &val);
392 if (!err) {
393 if (val & BMCR_RESET) {
394 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
395 bp->dev->name);
396 err = -ENODEV;
397 }
398 }
399
400 return 0;
401}
402
403static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
404{
405 u32 val;
406
407 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
408 bp->flags |= pause_flags;
409
410 val = br32(bp, B44_RXCONFIG);
411 if (pause_flags & B44_FLAG_RX_PAUSE)
412 val |= RXCONFIG_FLOW;
413 else
414 val &= ~RXCONFIG_FLOW;
415 bw32(bp, B44_RXCONFIG, val);
416
417 val = br32(bp, B44_MAC_FLOW);
418 if (pause_flags & B44_FLAG_TX_PAUSE)
419 val |= (MAC_FLOW_PAUSE_ENAB |
420 (0xc0 & MAC_FLOW_RX_HI_WATER));
421 else
422 val &= ~MAC_FLOW_PAUSE_ENAB;
423 bw32(bp, B44_MAC_FLOW, val);
424}
425
426static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
427{
10badc21 428 u32 pause_enab = 0;
2b474cf5
GZ
429
430 /* The driver supports only rx pause by default because
10badc21
JG
431 the b44 mac tx pause mechanism generates excessive
432 pause frames.
2b474cf5
GZ
433 Use ethtool to turn on b44 tx pause if necessary.
434 */
435 if ((local & ADVERTISE_PAUSE_CAP) &&
10badc21 436 (local & ADVERTISE_PAUSE_ASYM)){
2b474cf5
GZ
437 if ((remote & LPA_PAUSE_ASYM) &&
438 !(remote & LPA_PAUSE_CAP))
439 pause_enab |= B44_FLAG_RX_PAUSE;
1da177e4
LT
440 }
441
442 __b44_set_flow_ctrl(bp, pause_enab);
443}
444
445static int b44_setup_phy(struct b44 *bp)
446{
447 u32 val;
448 int err;
449
450 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
451 goto out;
452 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
453 val & MII_ALEDCTRL_ALLMSK)) != 0)
454 goto out;
455 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
456 goto out;
457 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
458 val | MII_TLEDCTRL_ENABLE)) != 0)
459 goto out;
460
461 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
462 u32 adv = ADVERTISE_CSMA;
463
464 if (bp->flags & B44_FLAG_ADV_10HALF)
465 adv |= ADVERTISE_10HALF;
466 if (bp->flags & B44_FLAG_ADV_10FULL)
467 adv |= ADVERTISE_10FULL;
468 if (bp->flags & B44_FLAG_ADV_100HALF)
469 adv |= ADVERTISE_100HALF;
470 if (bp->flags & B44_FLAG_ADV_100FULL)
471 adv |= ADVERTISE_100FULL;
472
473 if (bp->flags & B44_FLAG_PAUSE_AUTO)
474 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
475
476 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
477 goto out;
478 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
479 BMCR_ANRESTART))) != 0)
480 goto out;
481 } else {
482 u32 bmcr;
483
484 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
485 goto out;
486 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
487 if (bp->flags & B44_FLAG_100_BASE_T)
488 bmcr |= BMCR_SPEED100;
489 if (bp->flags & B44_FLAG_FULL_DUPLEX)
490 bmcr |= BMCR_FULLDPLX;
491 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
492 goto out;
493
494 /* Since we will not be negotiating there is no safe way
495 * to determine if the link partner supports flow control
496 * or not. So just disable it completely in this case.
497 */
498 b44_set_flow_ctrl(bp, 0, 0);
499 }
500
501out:
502 return err;
503}
504
505static void b44_stats_update(struct b44 *bp)
506{
507 unsigned long reg;
508 u32 *val;
509
510 val = &bp->hw_stats.tx_good_octets;
511 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
512 *val++ += br32(bp, reg);
513 }
3353930d
FR
514
515 /* Pad */
516 reg += 8*4UL;
517
1da177e4
LT
518 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
519 *val++ += br32(bp, reg);
520 }
521}
522
523static void b44_link_report(struct b44 *bp)
524{
525 if (!netif_carrier_ok(bp->dev)) {
526 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
527 } else {
528 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
529 bp->dev->name,
530 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
531 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
532
533 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
534 "%s for RX.\n",
535 bp->dev->name,
536 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
537 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
538 }
539}
540
541static void b44_check_phy(struct b44 *bp)
542{
543 u32 bmsr, aux;
544
545 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
546 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
547 (bmsr != 0xffff)) {
548 if (aux & MII_AUXCTRL_SPEED)
549 bp->flags |= B44_FLAG_100_BASE_T;
550 else
551 bp->flags &= ~B44_FLAG_100_BASE_T;
552 if (aux & MII_AUXCTRL_DUPLEX)
553 bp->flags |= B44_FLAG_FULL_DUPLEX;
554 else
555 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
556
557 if (!netif_carrier_ok(bp->dev) &&
558 (bmsr & BMSR_LSTATUS)) {
559 u32 val = br32(bp, B44_TX_CTRL);
560 u32 local_adv, remote_adv;
561
562 if (bp->flags & B44_FLAG_FULL_DUPLEX)
563 val |= TX_CTRL_DUPLEX;
564 else
565 val &= ~TX_CTRL_DUPLEX;
566 bw32(bp, B44_TX_CTRL, val);
567
568 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
569 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
570 !b44_readphy(bp, MII_LPA, &remote_adv))
571 b44_set_flow_ctrl(bp, local_adv, remote_adv);
572
573 /* Link now up */
574 netif_carrier_on(bp->dev);
575 b44_link_report(bp);
576 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
577 /* Link now down */
578 netif_carrier_off(bp->dev);
579 b44_link_report(bp);
580 }
581
582 if (bmsr & BMSR_RFAULT)
583 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
584 bp->dev->name);
585 if (bmsr & BMSR_JCD)
586 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
587 bp->dev->name);
588 }
589}
590
591static void b44_timer(unsigned long __opaque)
592{
593 struct b44 *bp = (struct b44 *) __opaque;
594
595 spin_lock_irq(&bp->lock);
596
597 b44_check_phy(bp);
598
599 b44_stats_update(bp);
600
601 spin_unlock_irq(&bp->lock);
602
a72a8179 603 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
1da177e4
LT
604}
605
606static void b44_tx(struct b44 *bp)
607{
608 u32 cur, cons;
609
610 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
611 cur /= sizeof(struct dma_desc);
612
613 /* XXX needs updating when NETIF_F_SG is supported */
614 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
615 struct ring_info *rp = &bp->tx_buffers[cons];
616 struct sk_buff *skb = rp->skb;
617
5d9428de 618 BUG_ON(skb == NULL);
1da177e4
LT
619
620 pci_unmap_single(bp->pdev,
621 pci_unmap_addr(rp, mapping),
622 skb->len,
623 PCI_DMA_TODEVICE);
624 rp->skb = NULL;
625 dev_kfree_skb_irq(skb);
626 }
627
628 bp->tx_cons = cons;
629 if (netif_queue_stopped(bp->dev) &&
630 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
631 netif_wake_queue(bp->dev);
632
633 bw32(bp, B44_GPTIMER, 0);
634}
635
636/* Works like this. This chip writes a 'struct rx_header" 30 bytes
637 * before the DMA address you give it. So we allocate 30 more bytes
638 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
639 * point the chip at 30 bytes past where the rx_header will go.
640 */
641static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
642{
643 struct dma_desc *dp;
644 struct ring_info *src_map, *map;
645 struct rx_header *rh;
646 struct sk_buff *skb;
647 dma_addr_t mapping;
648 int dest_idx;
649 u32 ctrl;
650
651 src_map = NULL;
652 if (src_idx >= 0)
653 src_map = &bp->rx_buffers[src_idx];
654 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
655 map = &bp->rx_buffers[dest_idx];
bf0dcbd9 656 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
1da177e4
LT
657 if (skb == NULL)
658 return -ENOMEM;
659
660 mapping = pci_map_single(bp->pdev, skb->data,
661 RX_PKT_BUF_SZ,
662 PCI_DMA_FROMDEVICE);
663
664 /* Hardware bug work-around, the chip is unable to do PCI DMA
665 to/from anything above 1GB :-( */
639b421b 666 if (dma_mapping_error(mapping) ||
97db9ee7 667 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
1da177e4 668 /* Sigh... */
639b421b
AK
669 if (!dma_mapping_error(mapping))
670 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
1da177e4 671 dev_kfree_skb_any(skb);
bf0dcbd9 672 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
1da177e4
LT
673 if (skb == NULL)
674 return -ENOMEM;
675 mapping = pci_map_single(bp->pdev, skb->data,
676 RX_PKT_BUF_SZ,
677 PCI_DMA_FROMDEVICE);
639b421b 678 if (dma_mapping_error(mapping) ||
97db9ee7 679 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
639b421b
AK
680 if (!dma_mapping_error(mapping))
681 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
1da177e4
LT
682 dev_kfree_skb_any(skb);
683 return -ENOMEM;
684 }
685 }
686
72f4861e
SH
687 rh = (struct rx_header *) skb->data;
688 skb_reserve(skb, RX_PKT_OFFSET);
1da177e4 689
1da177e4
LT
690 rh->len = 0;
691 rh->flags = 0;
692
693 map->skb = skb;
694 pci_unmap_addr_set(map, mapping, mapping);
695
696 if (src_map != NULL)
697 src_map->skb = NULL;
698
72f4861e 699 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET));
1da177e4
LT
700 if (dest_idx == (B44_RX_RING_SIZE - 1))
701 ctrl |= DESC_CTRL_EOT;
702
703 dp = &bp->rx_ring[dest_idx];
704 dp->ctrl = cpu_to_le32(ctrl);
72f4861e 705 dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
1da177e4 706
9f38c636
JL
707 if (bp->flags & B44_FLAG_RX_RING_HACK)
708 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
709 dest_idx * sizeof(dp),
710 DMA_BIDIRECTIONAL);
711
1da177e4
LT
712 return RX_PKT_BUF_SZ;
713}
714
715static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
716{
717 struct dma_desc *src_desc, *dest_desc;
718 struct ring_info *src_map, *dest_map;
719 struct rx_header *rh;
720 int dest_idx;
a7bed27d 721 __le32 ctrl;
1da177e4
LT
722
723 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
724 dest_desc = &bp->rx_ring[dest_idx];
725 dest_map = &bp->rx_buffers[dest_idx];
726 src_desc = &bp->rx_ring[src_idx];
727 src_map = &bp->rx_buffers[src_idx];
728
729 dest_map->skb = src_map->skb;
730 rh = (struct rx_header *) src_map->skb->data;
731 rh->len = 0;
732 rh->flags = 0;
733 pci_unmap_addr_set(dest_map, mapping,
734 pci_unmap_addr(src_map, mapping));
735
9f38c636
JL
736 if (bp->flags & B44_FLAG_RX_RING_HACK)
737 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
738 src_idx * sizeof(src_desc),
739 DMA_BIDIRECTIONAL);
740
1da177e4
LT
741 ctrl = src_desc->ctrl;
742 if (dest_idx == (B44_RX_RING_SIZE - 1))
743 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
744 else
745 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
746
747 dest_desc->ctrl = ctrl;
748 dest_desc->addr = src_desc->addr;
9f38c636 749
1da177e4
LT
750 src_map->skb = NULL;
751
9f38c636
JL
752 if (bp->flags & B44_FLAG_RX_RING_HACK)
753 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
754 dest_idx * sizeof(dest_desc),
755 DMA_BIDIRECTIONAL);
756
00576e93 757 pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
1da177e4
LT
758 RX_PKT_BUF_SZ,
759 PCI_DMA_FROMDEVICE);
760}
761
762static int b44_rx(struct b44 *bp, int budget)
763{
764 int received;
765 u32 cons, prod;
766
767 received = 0;
768 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
769 prod /= sizeof(struct dma_desc);
770 cons = bp->rx_cons;
771
772 while (cons != prod && budget > 0) {
773 struct ring_info *rp = &bp->rx_buffers[cons];
774 struct sk_buff *skb = rp->skb;
775 dma_addr_t map = pci_unmap_addr(rp, mapping);
776 struct rx_header *rh;
777 u16 len;
778
779 pci_dma_sync_single_for_cpu(bp->pdev, map,
780 RX_PKT_BUF_SZ,
781 PCI_DMA_FROMDEVICE);
782 rh = (struct rx_header *) skb->data;
a7bed27d 783 len = le16_to_cpu(rh->len);
72f4861e 784 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
1da177e4
LT
785 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
786 drop_it:
787 b44_recycle_rx(bp, cons, bp->rx_prod);
788 drop_it_no_recycle:
789 bp->stats.rx_dropped++;
790 goto next_pkt;
791 }
792
793 if (len == 0) {
794 int i = 0;
795
796 do {
797 udelay(2);
798 barrier();
a7bed27d 799 len = le16_to_cpu(rh->len);
1da177e4
LT
800 } while (len == 0 && i++ < 5);
801 if (len == 0)
802 goto drop_it;
803 }
804
805 /* Omit CRC. */
806 len -= 4;
807
808 if (len > RX_COPY_THRESHOLD) {
809 int skb_size;
810 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
811 if (skb_size < 0)
812 goto drop_it;
813 pci_unmap_single(bp->pdev, map,
814 skb_size, PCI_DMA_FROMDEVICE);
815 /* Leave out rx_header */
72f4861e
SH
816 skb_put(skb, len + RX_PKT_OFFSET);
817 skb_pull(skb, RX_PKT_OFFSET);
1da177e4
LT
818 } else {
819 struct sk_buff *copy_skb;
820
821 b44_recycle_rx(bp, cons, bp->rx_prod);
822 copy_skb = dev_alloc_skb(len + 2);
823 if (copy_skb == NULL)
824 goto drop_it_no_recycle;
825
1da177e4
LT
826 skb_reserve(copy_skb, 2);
827 skb_put(copy_skb, len);
828 /* DMA sync done above, copy just the actual packet */
72f4861e 829 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
d626f62b 830 copy_skb->data, len);
1da177e4
LT
831 skb = copy_skb;
832 }
833 skb->ip_summed = CHECKSUM_NONE;
834 skb->protocol = eth_type_trans(skb, bp->dev);
835 netif_receive_skb(skb);
836 bp->dev->last_rx = jiffies;
837 received++;
838 budget--;
839 next_pkt:
840 bp->rx_prod = (bp->rx_prod + 1) &
841 (B44_RX_RING_SIZE - 1);
842 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
843 }
844
845 bp->rx_cons = cons;
846 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
847
848 return received;
849}
850
851static int b44_poll(struct net_device *netdev, int *budget)
852{
853 struct b44 *bp = netdev_priv(netdev);
854 int done;
855
856 spin_lock_irq(&bp->lock);
857
858 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
859 /* spin_lock(&bp->tx_lock); */
860 b44_tx(bp);
861 /* spin_unlock(&bp->tx_lock); */
862 }
863 spin_unlock_irq(&bp->lock);
864
865 done = 1;
866 if (bp->istat & ISTAT_RX) {
867 int orig_budget = *budget;
868 int work_done;
869
870 if (orig_budget > netdev->quota)
871 orig_budget = netdev->quota;
872
873 work_done = b44_rx(bp, orig_budget);
874
875 *budget -= work_done;
876 netdev->quota -= work_done;
877
878 if (work_done >= orig_budget)
879 done = 0;
880 }
881
882 if (bp->istat & ISTAT_ERRORS) {
d15e9c4d
FR
883 unsigned long flags;
884
885 spin_lock_irqsave(&bp->lock, flags);
1da177e4
LT
886 b44_halt(bp);
887 b44_init_rings(bp);
5fc7d61a 888 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
1da177e4 889 netif_wake_queue(bp->dev);
d15e9c4d 890 spin_unlock_irqrestore(&bp->lock, flags);
1da177e4
LT
891 done = 1;
892 }
893
894 if (done) {
895 netif_rx_complete(netdev);
896 b44_enable_ints(bp);
897 }
898
899 return (done ? 0 : 1);
900}
901
7d12e780 902static irqreturn_t b44_interrupt(int irq, void *dev_id)
1da177e4
LT
903{
904 struct net_device *dev = dev_id;
905 struct b44 *bp = netdev_priv(dev);
1da177e4
LT
906 u32 istat, imask;
907 int handled = 0;
908
65b984f2 909 spin_lock(&bp->lock);
1da177e4
LT
910
911 istat = br32(bp, B44_ISTAT);
912 imask = br32(bp, B44_IMASK);
913
e78181fe
JB
914 /* The interrupt mask register controls which interrupt bits
915 * will actually raise an interrupt to the CPU when set by hw/firmware,
916 * but doesn't mask off the bits.
1da177e4
LT
917 */
918 istat &= imask;
919 if (istat) {
920 handled = 1;
ba5eec9c
FR
921
922 if (unlikely(!netif_running(dev))) {
923 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
924 goto irq_ack;
925 }
926
1da177e4
LT
927 if (netif_rx_schedule_prep(dev)) {
928 /* NOTE: These writes are posted by the readback of
929 * the ISTAT register below.
930 */
931 bp->istat = istat;
932 __b44_disable_ints(bp);
933 __netif_rx_schedule(dev);
934 } else {
935 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
936 dev->name);
937 }
938
ba5eec9c 939irq_ack:
1da177e4
LT
940 bw32(bp, B44_ISTAT, istat);
941 br32(bp, B44_ISTAT);
942 }
65b984f2 943 spin_unlock(&bp->lock);
1da177e4
LT
944 return IRQ_RETVAL(handled);
945}
946
947static void b44_tx_timeout(struct net_device *dev)
948{
949 struct b44 *bp = netdev_priv(dev);
950
951 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
952 dev->name);
953
954 spin_lock_irq(&bp->lock);
955
956 b44_halt(bp);
957 b44_init_rings(bp);
5fc7d61a 958 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
959
960 spin_unlock_irq(&bp->lock);
961
962 b44_enable_ints(bp);
963
964 netif_wake_queue(dev);
965}
966
967static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
968{
969 struct b44 *bp = netdev_priv(dev);
c7193693 970 int rc = NETDEV_TX_OK;
1da177e4
LT
971 dma_addr_t mapping;
972 u32 len, entry, ctrl;
973
974 len = skb->len;
975 spin_lock_irq(&bp->lock);
976
977 /* This is a hard error, log it. */
978 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
979 netif_stop_queue(dev);
1da177e4
LT
980 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
981 dev->name);
c7193693 982 goto err_out;
1da177e4
LT
983 }
984
985 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
97db9ee7 986 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
f65a7177
SH
987 struct sk_buff *bounce_skb;
988
1da177e4 989 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
639b421b
AK
990 if (!dma_mapping_error(mapping))
991 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
1da177e4 992
f65a7177 993 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
1da177e4 994 if (!bounce_skb)
c7193693 995 goto err_out;
1da177e4
LT
996
997 mapping = pci_map_single(bp->pdev, bounce_skb->data,
998 len, PCI_DMA_TODEVICE);
97db9ee7 999 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
639b421b
AK
1000 if (!dma_mapping_error(mapping))
1001 pci_unmap_single(bp->pdev, mapping,
f65a7177 1002 len, PCI_DMA_TODEVICE);
1da177e4 1003 dev_kfree_skb_any(bounce_skb);
c7193693 1004 goto err_out;
1da177e4
LT
1005 }
1006
f65a7177 1007 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1da177e4
LT
1008 dev_kfree_skb_any(skb);
1009 skb = bounce_skb;
1010 }
1011
1012 entry = bp->tx_prod;
1013 bp->tx_buffers[entry].skb = skb;
1014 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1015
1016 ctrl = (len & DESC_CTRL_LEN);
1017 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1018 if (entry == (B44_TX_RING_SIZE - 1))
1019 ctrl |= DESC_CTRL_EOT;
1020
1021 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1022 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1023
9f38c636
JL
1024 if (bp->flags & B44_FLAG_TX_RING_HACK)
1025 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1026 entry * sizeof(bp->tx_ring[0]),
1027 DMA_TO_DEVICE);
1028
1da177e4
LT
1029 entry = NEXT_TX(entry);
1030
1031 bp->tx_prod = entry;
1032
1033 wmb();
1034
1035 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1036 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1037 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1038 if (bp->flags & B44_FLAG_REORDER_BUG)
1039 br32(bp, B44_DMATX_PTR);
1040
1041 if (TX_BUFFS_AVAIL(bp) < 1)
1042 netif_stop_queue(dev);
1043
c7193693
FR
1044 dev->trans_start = jiffies;
1045
1046out_unlock:
1da177e4
LT
1047 spin_unlock_irq(&bp->lock);
1048
c7193693 1049 return rc;
1da177e4 1050
c7193693
FR
1051err_out:
1052 rc = NETDEV_TX_BUSY;
1053 goto out_unlock;
1da177e4
LT
1054}
1055
1056static int b44_change_mtu(struct net_device *dev, int new_mtu)
1057{
1058 struct b44 *bp = netdev_priv(dev);
1059
1060 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1061 return -EINVAL;
1062
1063 if (!netif_running(dev)) {
1064 /* We'll just catch it later when the
1065 * device is up'd.
1066 */
1067 dev->mtu = new_mtu;
1068 return 0;
1069 }
1070
1071 spin_lock_irq(&bp->lock);
1072 b44_halt(bp);
1073 dev->mtu = new_mtu;
1074 b44_init_rings(bp);
5fc7d61a 1075 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1076 spin_unlock_irq(&bp->lock);
1077
1078 b44_enable_ints(bp);
10badc21 1079
1da177e4
LT
1080 return 0;
1081}
1082
1083/* Free up pending packets in all rx/tx rings.
1084 *
1085 * The chip has been shut down and the driver detached from
1086 * the networking, so no interrupts or new tx packets will
1087 * end up in the driver. bp->lock is not held and we are not
1088 * in an interrupt context and thus may sleep.
1089 */
1090static void b44_free_rings(struct b44 *bp)
1091{
1092 struct ring_info *rp;
1093 int i;
1094
1095 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1096 rp = &bp->rx_buffers[i];
1097
1098 if (rp->skb == NULL)
1099 continue;
1100 pci_unmap_single(bp->pdev,
1101 pci_unmap_addr(rp, mapping),
1102 RX_PKT_BUF_SZ,
1103 PCI_DMA_FROMDEVICE);
1104 dev_kfree_skb_any(rp->skb);
1105 rp->skb = NULL;
1106 }
1107
1108 /* XXX needs changes once NETIF_F_SG is set... */
1109 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1110 rp = &bp->tx_buffers[i];
1111
1112 if (rp->skb == NULL)
1113 continue;
1114 pci_unmap_single(bp->pdev,
1115 pci_unmap_addr(rp, mapping),
1116 rp->skb->len,
1117 PCI_DMA_TODEVICE);
1118 dev_kfree_skb_any(rp->skb);
1119 rp->skb = NULL;
1120 }
1121}
1122
1123/* Initialize tx/rx rings for packet processing.
1124 *
1125 * The chip has been shut down and the driver detached from
1126 * the networking, so no interrupts or new tx packets will
874a6214 1127 * end up in the driver.
1da177e4
LT
1128 */
1129static void b44_init_rings(struct b44 *bp)
1130{
1131 int i;
1132
1133 b44_free_rings(bp);
1134
1135 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1136 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1137
9f38c636
JL
1138 if (bp->flags & B44_FLAG_RX_RING_HACK)
1139 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1140 DMA_TABLE_BYTES,
1141 PCI_DMA_BIDIRECTIONAL);
1142
1143 if (bp->flags & B44_FLAG_TX_RING_HACK)
1144 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1145 DMA_TABLE_BYTES,
1146 PCI_DMA_TODEVICE);
1147
1da177e4
LT
1148 for (i = 0; i < bp->rx_pending; i++) {
1149 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1150 break;
1151 }
1152}
1153
1154/*
1155 * Must not be invoked with interrupt sources disabled and
1156 * the hardware shutdown down.
1157 */
1158static void b44_free_consistent(struct b44 *bp)
1159{
b4558ea9
JJ
1160 kfree(bp->rx_buffers);
1161 bp->rx_buffers = NULL;
1162 kfree(bp->tx_buffers);
1163 bp->tx_buffers = NULL;
1da177e4 1164 if (bp->rx_ring) {
9f38c636
JL
1165 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1166 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1167 DMA_TABLE_BYTES,
1168 DMA_BIDIRECTIONAL);
1169 kfree(bp->rx_ring);
1170 } else
1171 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1172 bp->rx_ring, bp->rx_ring_dma);
1da177e4 1173 bp->rx_ring = NULL;
9f38c636 1174 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1da177e4
LT
1175 }
1176 if (bp->tx_ring) {
9f38c636
JL
1177 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1178 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1179 DMA_TABLE_BYTES,
1180 DMA_TO_DEVICE);
1181 kfree(bp->tx_ring);
1182 } else
1183 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1184 bp->tx_ring, bp->tx_ring_dma);
1da177e4 1185 bp->tx_ring = NULL;
9f38c636 1186 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1da177e4
LT
1187 }
1188}
1189
1190/*
1191 * Must not be invoked with interrupt sources disabled and
1192 * the hardware shutdown down. Can sleep.
1193 */
1194static int b44_alloc_consistent(struct b44 *bp)
1195{
1196 int size;
1197
1198 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
874a6214 1199 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1da177e4
LT
1200 if (!bp->rx_buffers)
1201 goto out_err;
1da177e4
LT
1202
1203 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
874a6214 1204 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1da177e4
LT
1205 if (!bp->tx_buffers)
1206 goto out_err;
1da177e4
LT
1207
1208 size = DMA_TABLE_BYTES;
1209 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
9f38c636
JL
1210 if (!bp->rx_ring) {
1211 /* Allocation may have failed due to pci_alloc_consistent
1212 insisting on use of GFP_DMA, which is more restrictive
1213 than necessary... */
1214 struct dma_desc *rx_ring;
1215 dma_addr_t rx_ring_dma;
1216
874a6214
FR
1217 rx_ring = kzalloc(size, GFP_KERNEL);
1218 if (!rx_ring)
9f38c636
JL
1219 goto out_err;
1220
9f38c636
JL
1221 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1222 DMA_TABLE_BYTES,
1223 DMA_BIDIRECTIONAL);
1224
639b421b 1225 if (dma_mapping_error(rx_ring_dma) ||
97db9ee7 1226 rx_ring_dma + size > DMA_30BIT_MASK) {
9f38c636
JL
1227 kfree(rx_ring);
1228 goto out_err;
1229 }
1230
1231 bp->rx_ring = rx_ring;
1232 bp->rx_ring_dma = rx_ring_dma;
1233 bp->flags |= B44_FLAG_RX_RING_HACK;
1234 }
1da177e4
LT
1235
1236 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
9f38c636
JL
1237 if (!bp->tx_ring) {
1238 /* Allocation may have failed due to pci_alloc_consistent
1239 insisting on use of GFP_DMA, which is more restrictive
1240 than necessary... */
1241 struct dma_desc *tx_ring;
1242 dma_addr_t tx_ring_dma;
1243
874a6214
FR
1244 tx_ring = kzalloc(size, GFP_KERNEL);
1245 if (!tx_ring)
9f38c636
JL
1246 goto out_err;
1247
9f38c636
JL
1248 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1249 DMA_TABLE_BYTES,
1250 DMA_TO_DEVICE);
1251
639b421b 1252 if (dma_mapping_error(tx_ring_dma) ||
97db9ee7 1253 tx_ring_dma + size > DMA_30BIT_MASK) {
9f38c636
JL
1254 kfree(tx_ring);
1255 goto out_err;
1256 }
1257
1258 bp->tx_ring = tx_ring;
1259 bp->tx_ring_dma = tx_ring_dma;
1260 bp->flags |= B44_FLAG_TX_RING_HACK;
1261 }
1da177e4
LT
1262
1263 return 0;
1264
1265out_err:
1266 b44_free_consistent(bp);
1267 return -ENOMEM;
1268}
1269
1270/* bp->lock is held. */
1271static void b44_clear_stats(struct b44 *bp)
1272{
1273 unsigned long reg;
1274
1275 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1276 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1277 br32(bp, reg);
1278 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1279 br32(bp, reg);
1280}
1281
1282/* bp->lock is held. */
1283static void b44_chip_reset(struct b44 *bp)
1284{
1285 if (ssb_is_core_up(bp)) {
1286 bw32(bp, B44_RCV_LAZY, 0);
1287 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
40ee8c76 1288 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1da177e4
LT
1289 bw32(bp, B44_DMATX_CTRL, 0);
1290 bp->tx_prod = bp->tx_cons = 0;
1291 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1292 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1293 100, 0);
1294 }
1295 bw32(bp, B44_DMARX_CTRL, 0);
1296 bp->rx_prod = bp->rx_cons = 0;
1297 } else {
1298 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1299 SBINTVEC_ENET0 :
1300 SBINTVEC_ENET1));
1301 }
1302
1303 ssb_core_reset(bp);
1304
1305 b44_clear_stats(bp);
1306
1307 /* Make PHY accessible. */
1308 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1309 (0x0d & MDIO_CTRL_MAXF_MASK)));
1310 br32(bp, B44_MDIO_CTRL);
1311
1312 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1313 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1314 br32(bp, B44_ENET_CTRL);
1315 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1316 } else {
1317 u32 val = br32(bp, B44_DEVCTRL);
1318
1319 if (val & DEVCTRL_EPR) {
1320 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1321 br32(bp, B44_DEVCTRL);
1322 udelay(100);
1323 }
1324 bp->flags |= B44_FLAG_INTERNAL_PHY;
1325 }
1326}
1327
1328/* bp->lock is held. */
1329static void b44_halt(struct b44 *bp)
1330{
1331 b44_disable_ints(bp);
1332 b44_chip_reset(bp);
1333}
1334
1335/* bp->lock is held. */
1336static void __b44_set_mac_addr(struct b44 *bp)
1337{
1338 bw32(bp, B44_CAM_CTRL, 0);
1339 if (!(bp->dev->flags & IFF_PROMISC)) {
1340 u32 val;
1341
1342 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1343 val = br32(bp, B44_CAM_CTRL);
1344 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1345 }
1346}
1347
1348static int b44_set_mac_addr(struct net_device *dev, void *p)
1349{
1350 struct b44 *bp = netdev_priv(dev);
1351 struct sockaddr *addr = p;
1352
1353 if (netif_running(dev))
1354 return -EBUSY;
1355
391fc09a
GZ
1356 if (!is_valid_ether_addr(addr->sa_data))
1357 return -EINVAL;
1358
1da177e4
LT
1359 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1360
1361 spin_lock_irq(&bp->lock);
1362 __b44_set_mac_addr(bp);
1363 spin_unlock_irq(&bp->lock);
1364
1365 return 0;
1366}
1367
1368/* Called at device open time to get the chip ready for
1369 * packet processing. Invoked with bp->lock held.
1370 */
1371static void __b44_set_rx_mode(struct net_device *);
5fc7d61a 1372static void b44_init_hw(struct b44 *bp, int reset_kind)
1da177e4
LT
1373{
1374 u32 val;
1375
1376 b44_chip_reset(bp);
5fc7d61a 1377 if (reset_kind == B44_FULL_RESET) {
00e8b3aa
GZ
1378 b44_phy_reset(bp);
1379 b44_setup_phy(bp);
1380 }
1da177e4
LT
1381
1382 /* Enable CRC32, set proper LED modes and power on PHY */
1383 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1384 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1385
1386 /* This sets the MAC address too. */
1387 __b44_set_rx_mode(bp->dev);
1388
1389 /* MTU + eth header + possible VLAN tag + struct rx_header */
1390 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1391 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1392
1393 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
5fc7d61a
MC
1394 if (reset_kind == B44_PARTIAL_RESET) {
1395 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
72f4861e 1396 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
5fc7d61a 1397 } else {
00e8b3aa
GZ
1398 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1399 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1400 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
72f4861e 1401 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
00e8b3aa 1402 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1da177e4 1403
00e8b3aa
GZ
1404 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1405 bp->rx_prod = bp->rx_pending;
1da177e4 1406
00e8b3aa 1407 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
00e8b3aa 1408 }
1da177e4
LT
1409
1410 val = br32(bp, B44_ENET_CTRL);
1411 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1412}
1413
1414static int b44_open(struct net_device *dev)
1415{
1416 struct b44 *bp = netdev_priv(dev);
1417 int err;
1418
1419 err = b44_alloc_consistent(bp);
1420 if (err)
6c2f4267 1421 goto out;
1da177e4
LT
1422
1423 b44_init_rings(bp);
5fc7d61a 1424 b44_init_hw(bp, B44_FULL_RESET);
1da177e4 1425
e254e9bf
JL
1426 b44_check_phy(bp);
1427
1fb9df5d 1428 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
6c2f4267
FR
1429 if (unlikely(err < 0)) {
1430 b44_chip_reset(bp);
1431 b44_free_rings(bp);
1432 b44_free_consistent(bp);
1433 goto out;
1434 }
1da177e4
LT
1435
1436 init_timer(&bp->timer);
1437 bp->timer.expires = jiffies + HZ;
1438 bp->timer.data = (unsigned long) bp;
1439 bp->timer.function = b44_timer;
1440 add_timer(&bp->timer);
1441
1442 b44_enable_ints(bp);
d9e2d185 1443 netif_start_queue(dev);
6c2f4267 1444out:
1da177e4
LT
1445 return err;
1446}
1447
1448#if 0
1449/*static*/ void b44_dump_state(struct b44 *bp)
1450{
1451 u32 val32, val32_2, val32_3, val32_4, val32_5;
1452 u16 val16;
1453
1454 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1455 printk("DEBUG: PCI status [%04x] \n", val16);
1456
1457}
1458#endif
1459
1460#ifdef CONFIG_NET_POLL_CONTROLLER
1461/*
1462 * Polling receive - used by netconsole and other diagnostic tools
1463 * to allow network i/o with interrupts disabled.
1464 */
1465static void b44_poll_controller(struct net_device *dev)
1466{
1467 disable_irq(dev->irq);
7d12e780 1468 b44_interrupt(dev->irq, dev);
1da177e4
LT
1469 enable_irq(dev->irq);
1470}
1471#endif
1472
725ad800
GZ
1473static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1474{
1475 u32 i;
1476 u32 *pattern = (u32 *) pp;
1477
1478 for (i = 0; i < bytes; i += sizeof(u32)) {
1479 bw32(bp, B44_FILT_ADDR, table_offset + i);
1480 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1481 }
1482}
1483
1484static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1485{
1486 int magicsync = 6;
1487 int k, j, len = offset;
1488 int ethaddr_bytes = ETH_ALEN;
1489
1490 memset(ppattern + offset, 0xff, magicsync);
1491 for (j = 0; j < magicsync; j++)
1492 set_bit(len++, (unsigned long *) pmask);
1493
1494 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1495 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1496 ethaddr_bytes = ETH_ALEN;
1497 else
1498 ethaddr_bytes = B44_PATTERN_SIZE - len;
1499 if (ethaddr_bytes <=0)
1500 break;
1501 for (k = 0; k< ethaddr_bytes; k++) {
1502 ppattern[offset + magicsync +
1503 (j * ETH_ALEN) + k] = macaddr[k];
1504 len++;
1505 set_bit(len, (unsigned long *) pmask);
1506 }
1507 }
1508 return len - 1;
1509}
1510
1511/* Setup magic packet patterns in the b44 WOL
1512 * pattern matching filter.
1513 */
1514static void b44_setup_pseudo_magicp(struct b44 *bp)
1515{
1516
1517 u32 val;
1518 int plen0, plen1, plen2;
1519 u8 *pwol_pattern;
1520 u8 pwol_mask[B44_PMASK_SIZE];
1521
1522 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1523 if (!pwol_pattern) {
1524 printk(KERN_ERR PFX "Memory not available for WOL\n");
1525 return;
1526 }
1527
1528 /* Ipv4 magic packet pattern - pattern 0.*/
1529 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1530 memset(pwol_mask, 0, B44_PMASK_SIZE);
1531 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1532 B44_ETHIPV4UDP_HLEN);
1533
1534 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1535 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1536
1537 /* Raw ethernet II magic packet pattern - pattern 1 */
1538 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1539 memset(pwol_mask, 0, B44_PMASK_SIZE);
1540 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1541 ETH_HLEN);
1542
1543 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1544 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1545 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1546 B44_PMASK_BASE + B44_PMASK_SIZE);
1547
1548 /* Ipv6 magic packet pattern - pattern 2 */
1549 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1550 memset(pwol_mask, 0, B44_PMASK_SIZE);
1551 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1552 B44_ETHIPV6UDP_HLEN);
1553
1554 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1555 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1556 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1557 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1558
1559 kfree(pwol_pattern);
1560
1561 /* set these pattern's lengths: one less than each real length */
1562 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1563 bw32(bp, B44_WKUP_LEN, val);
1564
1565 /* enable wakeup pattern matching */
1566 val = br32(bp, B44_DEVCTRL);
1567 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1568
1569}
52cafd96
GZ
1570
1571static void b44_setup_wol(struct b44 *bp)
1572{
1573 u32 val;
1574 u16 pmval;
1575
1576 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1577
1578 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1579
1580 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1581
1582 val = bp->dev->dev_addr[2] << 24 |
1583 bp->dev->dev_addr[3] << 16 |
1584 bp->dev->dev_addr[4] << 8 |
1585 bp->dev->dev_addr[5];
1586 bw32(bp, B44_ADDR_LO, val);
1587
1588 val = bp->dev->dev_addr[0] << 8 |
1589 bp->dev->dev_addr[1];
1590 bw32(bp, B44_ADDR_HI, val);
1591
1592 val = br32(bp, B44_DEVCTRL);
1593 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1594
725ad800
GZ
1595 } else {
1596 b44_setup_pseudo_magicp(bp);
1597 }
52cafd96
GZ
1598
1599 val = br32(bp, B44_SBTMSLOW);
1600 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1601
1602 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1603 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1604
1605}
1606
1da177e4
LT
1607static int b44_close(struct net_device *dev)
1608{
1609 struct b44 *bp = netdev_priv(dev);
1610
1611 netif_stop_queue(dev);
1612
ba5eec9c
FR
1613 netif_poll_disable(dev);
1614
1da177e4
LT
1615 del_timer_sync(&bp->timer);
1616
1617 spin_lock_irq(&bp->lock);
1618
1619#if 0
1620 b44_dump_state(bp);
1621#endif
1622 b44_halt(bp);
1623 b44_free_rings(bp);
c35ca399 1624 netif_carrier_off(dev);
1da177e4
LT
1625
1626 spin_unlock_irq(&bp->lock);
1627
1628 free_irq(dev->irq, dev);
1629
ba5eec9c
FR
1630 netif_poll_enable(dev);
1631
52cafd96 1632 if (bp->flags & B44_FLAG_WOL_ENABLE) {
5fc7d61a 1633 b44_init_hw(bp, B44_PARTIAL_RESET);
52cafd96
GZ
1634 b44_setup_wol(bp);
1635 }
1636
1da177e4
LT
1637 b44_free_consistent(bp);
1638
1639 return 0;
1640}
1641
1642static struct net_device_stats *b44_get_stats(struct net_device *dev)
1643{
1644 struct b44 *bp = netdev_priv(dev);
1645 struct net_device_stats *nstat = &bp->stats;
1646 struct b44_hw_stats *hwstat = &bp->hw_stats;
1647
1648 /* Convert HW stats into netdevice stats. */
1649 nstat->rx_packets = hwstat->rx_pkts;
1650 nstat->tx_packets = hwstat->tx_pkts;
1651 nstat->rx_bytes = hwstat->rx_octets;
1652 nstat->tx_bytes = hwstat->tx_octets;
1653 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1654 hwstat->tx_oversize_pkts +
1655 hwstat->tx_underruns +
1656 hwstat->tx_excessive_cols +
1657 hwstat->tx_late_cols);
1658 nstat->multicast = hwstat->tx_multicast_pkts;
1659 nstat->collisions = hwstat->tx_total_cols;
1660
1661 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1662 hwstat->rx_undersize);
1663 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1664 nstat->rx_frame_errors = hwstat->rx_align_errs;
1665 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1666 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1667 hwstat->rx_oversize_pkts +
1668 hwstat->rx_missed_pkts +
1669 hwstat->rx_crc_align_errs +
1670 hwstat->rx_undersize +
1671 hwstat->rx_crc_errs +
1672 hwstat->rx_align_errs +
1673 hwstat->rx_symbol_errs);
1674
1675 nstat->tx_aborted_errors = hwstat->tx_underruns;
1676#if 0
1677 /* Carrier lost counter seems to be broken for some devices */
1678 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1679#endif
1680
1681 return nstat;
1682}
1683
1684static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1685{
1686 struct dev_mc_list *mclist;
1687 int i, num_ents;
1688
1689 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1690 mclist = dev->mc_list;
1691 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1692 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1693 }
1694 return i+1;
1695}
1696
1697static void __b44_set_rx_mode(struct net_device *dev)
1698{
1699 struct b44 *bp = netdev_priv(dev);
1700 u32 val;
1da177e4
LT
1701
1702 val = br32(bp, B44_RXCONFIG);
1703 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1704 if (dev->flags & IFF_PROMISC) {
1705 val |= RXCONFIG_PROMISC;
1706 bw32(bp, B44_RXCONFIG, val);
1707 } else {
874a6214 1708 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
cda22aa9 1709 int i = 1;
874a6214 1710
1da177e4
LT
1711 __b44_set_mac_addr(bp);
1712
2f614fe0
JG
1713 if ((dev->flags & IFF_ALLMULTI) ||
1714 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1da177e4
LT
1715 val |= RXCONFIG_ALLMULTI;
1716 else
874a6214 1717 i = __b44_load_mcast(bp, dev);
10badc21 1718
2f614fe0 1719 for (; i < 64; i++)
10badc21 1720 __b44_cam_write(bp, zero, i);
2f614fe0 1721
1da177e4
LT
1722 bw32(bp, B44_RXCONFIG, val);
1723 val = br32(bp, B44_CAM_CTRL);
1724 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1725 }
1726}
1727
1728static void b44_set_rx_mode(struct net_device *dev)
1729{
1730 struct b44 *bp = netdev_priv(dev);
1731
1732 spin_lock_irq(&bp->lock);
1733 __b44_set_rx_mode(dev);
1734 spin_unlock_irq(&bp->lock);
1735}
1736
1737static u32 b44_get_msglevel(struct net_device *dev)
1738{
1739 struct b44 *bp = netdev_priv(dev);
1740 return bp->msg_enable;
1741}
1742
1743static void b44_set_msglevel(struct net_device *dev, u32 value)
1744{
1745 struct b44 *bp = netdev_priv(dev);
1746 bp->msg_enable = value;
1747}
1748
1749static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1750{
1751 struct b44 *bp = netdev_priv(dev);
1752 struct pci_dev *pci_dev = bp->pdev;
1753
1754 strcpy (info->driver, DRV_MODULE_NAME);
1755 strcpy (info->version, DRV_MODULE_VERSION);
1756 strcpy (info->bus_info, pci_name(pci_dev));
1757}
1758
1759static int b44_nway_reset(struct net_device *dev)
1760{
1761 struct b44 *bp = netdev_priv(dev);
1762 u32 bmcr;
1763 int r;
1764
1765 spin_lock_irq(&bp->lock);
1766 b44_readphy(bp, MII_BMCR, &bmcr);
1767 b44_readphy(bp, MII_BMCR, &bmcr);
1768 r = -EINVAL;
1769 if (bmcr & BMCR_ANENABLE) {
1770 b44_writephy(bp, MII_BMCR,
1771 bmcr | BMCR_ANRESTART);
1772 r = 0;
1773 }
1774 spin_unlock_irq(&bp->lock);
1775
1776 return r;
1777}
1778
1779static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1780{
1781 struct b44 *bp = netdev_priv(dev);
1782
1da177e4
LT
1783 cmd->supported = (SUPPORTED_Autoneg);
1784 cmd->supported |= (SUPPORTED_100baseT_Half |
1785 SUPPORTED_100baseT_Full |
1786 SUPPORTED_10baseT_Half |
1787 SUPPORTED_10baseT_Full |
1788 SUPPORTED_MII);
1789
1790 cmd->advertising = 0;
1791 if (bp->flags & B44_FLAG_ADV_10HALF)
adf6e000 1792 cmd->advertising |= ADVERTISED_10baseT_Half;
1da177e4 1793 if (bp->flags & B44_FLAG_ADV_10FULL)
adf6e000 1794 cmd->advertising |= ADVERTISED_10baseT_Full;
1da177e4 1795 if (bp->flags & B44_FLAG_ADV_100HALF)
adf6e000 1796 cmd->advertising |= ADVERTISED_100baseT_Half;
1da177e4 1797 if (bp->flags & B44_FLAG_ADV_100FULL)
adf6e000
MW
1798 cmd->advertising |= ADVERTISED_100baseT_Full;
1799 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1da177e4
LT
1800 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1801 SPEED_100 : SPEED_10;
1802 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1803 DUPLEX_FULL : DUPLEX_HALF;
1804 cmd->port = 0;
1805 cmd->phy_address = bp->phy_addr;
1806 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1807 XCVR_INTERNAL : XCVR_EXTERNAL;
1808 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1809 AUTONEG_DISABLE : AUTONEG_ENABLE;
47b9c3b1
GZ
1810 if (cmd->autoneg == AUTONEG_ENABLE)
1811 cmd->advertising |= ADVERTISED_Autoneg;
1812 if (!netif_running(dev)){
1813 cmd->speed = 0;
1814 cmd->duplex = 0xff;
1815 }
1da177e4
LT
1816 cmd->maxtxpkt = 0;
1817 cmd->maxrxpkt = 0;
1818 return 0;
1819}
1820
1821static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1822{
1823 struct b44 *bp = netdev_priv(dev);
1824
1da177e4
LT
1825 /* We do not support gigabit. */
1826 if (cmd->autoneg == AUTONEG_ENABLE) {
1827 if (cmd->advertising &
1828 (ADVERTISED_1000baseT_Half |
1829 ADVERTISED_1000baseT_Full))
1830 return -EINVAL;
1831 } else if ((cmd->speed != SPEED_100 &&
1832 cmd->speed != SPEED_10) ||
1833 (cmd->duplex != DUPLEX_HALF &&
1834 cmd->duplex != DUPLEX_FULL)) {
1835 return -EINVAL;
1836 }
1837
1838 spin_lock_irq(&bp->lock);
1839
1840 if (cmd->autoneg == AUTONEG_ENABLE) {
47b9c3b1
GZ
1841 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1842 B44_FLAG_100_BASE_T |
1843 B44_FLAG_FULL_DUPLEX |
1844 B44_FLAG_ADV_10HALF |
1da177e4
LT
1845 B44_FLAG_ADV_10FULL |
1846 B44_FLAG_ADV_100HALF |
1847 B44_FLAG_ADV_100FULL);
47b9c3b1
GZ
1848 if (cmd->advertising == 0) {
1849 bp->flags |= (B44_FLAG_ADV_10HALF |
1850 B44_FLAG_ADV_10FULL |
1851 B44_FLAG_ADV_100HALF |
1852 B44_FLAG_ADV_100FULL);
1853 } else {
1854 if (cmd->advertising & ADVERTISED_10baseT_Half)
1855 bp->flags |= B44_FLAG_ADV_10HALF;
1856 if (cmd->advertising & ADVERTISED_10baseT_Full)
1857 bp->flags |= B44_FLAG_ADV_10FULL;
1858 if (cmd->advertising & ADVERTISED_100baseT_Half)
1859 bp->flags |= B44_FLAG_ADV_100HALF;
1860 if (cmd->advertising & ADVERTISED_100baseT_Full)
1861 bp->flags |= B44_FLAG_ADV_100FULL;
1862 }
1da177e4
LT
1863 } else {
1864 bp->flags |= B44_FLAG_FORCE_LINK;
47b9c3b1 1865 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1da177e4
LT
1866 if (cmd->speed == SPEED_100)
1867 bp->flags |= B44_FLAG_100_BASE_T;
1868 if (cmd->duplex == DUPLEX_FULL)
1869 bp->flags |= B44_FLAG_FULL_DUPLEX;
1870 }
1871
47b9c3b1
GZ
1872 if (netif_running(dev))
1873 b44_setup_phy(bp);
1da177e4
LT
1874
1875 spin_unlock_irq(&bp->lock);
1876
1877 return 0;
1878}
1879
1880static void b44_get_ringparam(struct net_device *dev,
1881 struct ethtool_ringparam *ering)
1882{
1883 struct b44 *bp = netdev_priv(dev);
1884
1885 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1886 ering->rx_pending = bp->rx_pending;
1887
1888 /* XXX ethtool lacks a tx_max_pending, oops... */
1889}
1890
1891static int b44_set_ringparam(struct net_device *dev,
1892 struct ethtool_ringparam *ering)
1893{
1894 struct b44 *bp = netdev_priv(dev);
1895
1896 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1897 (ering->rx_mini_pending != 0) ||
1898 (ering->rx_jumbo_pending != 0) ||
1899 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1900 return -EINVAL;
1901
1902 spin_lock_irq(&bp->lock);
1903
1904 bp->rx_pending = ering->rx_pending;
1905 bp->tx_pending = ering->tx_pending;
1906
1907 b44_halt(bp);
1908 b44_init_rings(bp);
5fc7d61a 1909 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1910 netif_wake_queue(bp->dev);
1911 spin_unlock_irq(&bp->lock);
1912
1913 b44_enable_ints(bp);
10badc21 1914
1da177e4
LT
1915 return 0;
1916}
1917
1918static void b44_get_pauseparam(struct net_device *dev,
1919 struct ethtool_pauseparam *epause)
1920{
1921 struct b44 *bp = netdev_priv(dev);
1922
1923 epause->autoneg =
1924 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1925 epause->rx_pause =
1926 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1927 epause->tx_pause =
1928 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1929}
1930
1931static int b44_set_pauseparam(struct net_device *dev,
1932 struct ethtool_pauseparam *epause)
1933{
1934 struct b44 *bp = netdev_priv(dev);
1935
1936 spin_lock_irq(&bp->lock);
1937 if (epause->autoneg)
1938 bp->flags |= B44_FLAG_PAUSE_AUTO;
1939 else
1940 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1941 if (epause->rx_pause)
1942 bp->flags |= B44_FLAG_RX_PAUSE;
1943 else
1944 bp->flags &= ~B44_FLAG_RX_PAUSE;
1945 if (epause->tx_pause)
1946 bp->flags |= B44_FLAG_TX_PAUSE;
1947 else
1948 bp->flags &= ~B44_FLAG_TX_PAUSE;
1949 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1950 b44_halt(bp);
1951 b44_init_rings(bp);
5fc7d61a 1952 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1953 } else {
1954 __b44_set_flow_ctrl(bp, bp->flags);
1955 }
1956 spin_unlock_irq(&bp->lock);
1957
1958 b44_enable_ints(bp);
10badc21 1959
1da177e4
LT
1960 return 0;
1961}
1962
3353930d
FR
1963static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1964{
1965 switch(stringset) {
1966 case ETH_SS_STATS:
1967 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1968 break;
1969 }
1970}
1971
1972static int b44_get_stats_count(struct net_device *dev)
1973{
1974 return ARRAY_SIZE(b44_gstrings);
1975}
1976
1977static void b44_get_ethtool_stats(struct net_device *dev,
1978 struct ethtool_stats *stats, u64 *data)
1979{
1980 struct b44 *bp = netdev_priv(dev);
1981 u32 *val = &bp->hw_stats.tx_good_octets;
1982 u32 i;
1983
1984 spin_lock_irq(&bp->lock);
1985
1986 b44_stats_update(bp);
1987
1988 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1989 *data++ = *val++;
1990
1991 spin_unlock_irq(&bp->lock);
1992}
1993
52cafd96
GZ
1994static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1995{
1996 struct b44 *bp = netdev_priv(dev);
1997
1998 wol->supported = WAKE_MAGIC;
1999 if (bp->flags & B44_FLAG_WOL_ENABLE)
2000 wol->wolopts = WAKE_MAGIC;
2001 else
2002 wol->wolopts = 0;
2003 memset(&wol->sopass, 0, sizeof(wol->sopass));
2004}
2005
2006static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2007{
2008 struct b44 *bp = netdev_priv(dev);
2009
2010 spin_lock_irq(&bp->lock);
2011 if (wol->wolopts & WAKE_MAGIC)
2012 bp->flags |= B44_FLAG_WOL_ENABLE;
2013 else
2014 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2015 spin_unlock_irq(&bp->lock);
2016
2017 return 0;
2018}
2019
7282d491 2020static const struct ethtool_ops b44_ethtool_ops = {
1da177e4
LT
2021 .get_drvinfo = b44_get_drvinfo,
2022 .get_settings = b44_get_settings,
2023 .set_settings = b44_set_settings,
2024 .nway_reset = b44_nway_reset,
2025 .get_link = ethtool_op_get_link,
52cafd96
GZ
2026 .get_wol = b44_get_wol,
2027 .set_wol = b44_set_wol,
1da177e4
LT
2028 .get_ringparam = b44_get_ringparam,
2029 .set_ringparam = b44_set_ringparam,
2030 .get_pauseparam = b44_get_pauseparam,
2031 .set_pauseparam = b44_set_pauseparam,
2032 .get_msglevel = b44_get_msglevel,
2033 .set_msglevel = b44_set_msglevel,
3353930d
FR
2034 .get_strings = b44_get_strings,
2035 .get_stats_count = b44_get_stats_count,
2036 .get_ethtool_stats = b44_get_ethtool_stats,
2160de53 2037 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
2038};
2039
2040static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2041{
2042 struct mii_ioctl_data *data = if_mii(ifr);
2043 struct b44 *bp = netdev_priv(dev);
3410572d
FR
2044 int err = -EINVAL;
2045
2046 if (!netif_running(dev))
2047 goto out;
1da177e4
LT
2048
2049 spin_lock_irq(&bp->lock);
2050 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2051 spin_unlock_irq(&bp->lock);
3410572d 2052out:
1da177e4
LT
2053 return err;
2054}
2055
2056/* Read 128-bytes of EEPROM. */
2057static int b44_read_eeprom(struct b44 *bp, u8 *data)
2058{
2059 long i;
a7bed27d 2060 __le16 *ptr = (__le16 *) data;
1da177e4
LT
2061
2062 for (i = 0; i < 128; i += 2)
6f627683 2063 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
1da177e4
LT
2064
2065 return 0;
2066}
2067
2068static int __devinit b44_get_invariants(struct b44 *bp)
2069{
2070 u8 eeprom[128];
2071 int err;
2072
2073 err = b44_read_eeprom(bp, &eeprom[0]);
2074 if (err)
2075 goto out;
2076
2077 bp->dev->dev_addr[0] = eeprom[79];
2078 bp->dev->dev_addr[1] = eeprom[78];
2079 bp->dev->dev_addr[2] = eeprom[81];
2080 bp->dev->dev_addr[3] = eeprom[80];
2081 bp->dev->dev_addr[4] = eeprom[83];
2082 bp->dev->dev_addr[5] = eeprom[82];
391fc09a
GZ
2083
2084 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2085 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2086 return -EINVAL;
2087 }
2088
2160de53 2089 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1da177e4
LT
2090
2091 bp->phy_addr = eeprom[90] & 0x1f;
2092
1da177e4
LT
2093 bp->imask = IMASK_DEF;
2094
2095 bp->core_unit = ssb_core_unit(bp);
2096 bp->dma_offset = SB_PCI_DMA;
2097
10badc21 2098 /* XXX - really required?
1da177e4
LT
2099 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2100 */
52cafd96
GZ
2101
2102 if (ssb_get_core_rev(bp) >= 7)
2103 bp->flags |= B44_FLAG_B0_ANDLATER;
2104
1da177e4
LT
2105out:
2106 return err;
2107}
2108
2109static int __devinit b44_init_one(struct pci_dev *pdev,
2110 const struct pci_device_id *ent)
2111{
2112 static int b44_version_printed = 0;
2113 unsigned long b44reg_base, b44reg_len;
2114 struct net_device *dev;
2115 struct b44 *bp;
2116 int err, i;
2117
2118 if (b44_version_printed++ == 0)
2119 printk(KERN_INFO "%s", version);
2120
2121 err = pci_enable_device(pdev);
2122 if (err) {
9b91cf9d 2123 dev_err(&pdev->dev, "Cannot enable PCI device, "
1da177e4
LT
2124 "aborting.\n");
2125 return err;
2126 }
2127
2128 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 2129 dev_err(&pdev->dev,
2e8a538d 2130 "Cannot find proper PCI device "
1da177e4
LT
2131 "base address, aborting.\n");
2132 err = -ENODEV;
2133 goto err_out_disable_pdev;
2134 }
2135
2136 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2137 if (err) {
9b91cf9d 2138 dev_err(&pdev->dev,
2e8a538d 2139 "Cannot obtain PCI resources, aborting.\n");
1da177e4
LT
2140 goto err_out_disable_pdev;
2141 }
2142
2143 pci_set_master(pdev);
2144
97db9ee7 2145 err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1da177e4 2146 if (err) {
9b91cf9d 2147 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
1da177e4
LT
2148 goto err_out_free_res;
2149 }
10badc21 2150
97db9ee7 2151 err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1da177e4 2152 if (err) {
9b91cf9d 2153 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
874a6214 2154 goto err_out_free_res;
1da177e4
LT
2155 }
2156
2157 b44reg_base = pci_resource_start(pdev, 0);
2158 b44reg_len = pci_resource_len(pdev, 0);
2159
2160 dev = alloc_etherdev(sizeof(*bp));
2161 if (!dev) {
9b91cf9d 2162 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
1da177e4
LT
2163 err = -ENOMEM;
2164 goto err_out_free_res;
2165 }
2166
2167 SET_MODULE_OWNER(dev);
2168 SET_NETDEV_DEV(dev,&pdev->dev);
2169
2170 /* No interesting netdevice features in this card... */
2171 dev->features |= 0;
2172
2173 bp = netdev_priv(dev);
2174 bp->pdev = pdev;
2175 bp->dev = dev;
874a6214
FR
2176
2177 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1da177e4
LT
2178
2179 spin_lock_init(&bp->lock);
2180
2181 bp->regs = ioremap(b44reg_base, b44reg_len);
2182 if (bp->regs == 0UL) {
9b91cf9d 2183 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
1da177e4
LT
2184 err = -ENOMEM;
2185 goto err_out_free_dev;
2186 }
2187
2188 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2189 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2190
2191 dev->open = b44_open;
2192 dev->stop = b44_close;
2193 dev->hard_start_xmit = b44_start_xmit;
2194 dev->get_stats = b44_get_stats;
2195 dev->set_multicast_list = b44_set_rx_mode;
2196 dev->set_mac_address = b44_set_mac_addr;
2197 dev->do_ioctl = b44_ioctl;
2198 dev->tx_timeout = b44_tx_timeout;
2199 dev->poll = b44_poll;
2200 dev->weight = 64;
2201 dev->watchdog_timeo = B44_TX_TIMEOUT;
2202#ifdef CONFIG_NET_POLL_CONTROLLER
2203 dev->poll_controller = b44_poll_controller;
2204#endif
2205 dev->change_mtu = b44_change_mtu;
2206 dev->irq = pdev->irq;
2207 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2208
c35ca399
SH
2209 netif_carrier_off(dev);
2210
1da177e4
LT
2211 err = b44_get_invariants(bp);
2212 if (err) {
9b91cf9d 2213 dev_err(&pdev->dev,
2e8a538d 2214 "Problem fetching invariants of chip, aborting.\n");
1da177e4
LT
2215 goto err_out_iounmap;
2216 }
2217
2218 bp->mii_if.dev = dev;
2219 bp->mii_if.mdio_read = b44_mii_read;
2220 bp->mii_if.mdio_write = b44_mii_write;
2221 bp->mii_if.phy_id = bp->phy_addr;
2222 bp->mii_if.phy_id_mask = 0x1f;
2223 bp->mii_if.reg_num_mask = 0x1f;
2224
2225 /* By default, advertise all speed/duplex settings. */
2226 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2227 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2228
2229 /* By default, auto-negotiate PAUSE. */
2230 bp->flags |= B44_FLAG_PAUSE_AUTO;
2231
2232 err = register_netdev(dev);
2233 if (err) {
9b91cf9d 2234 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1da177e4
LT
2235 goto err_out_iounmap;
2236 }
2237
2238 pci_set_drvdata(pdev, dev);
2239
2240 pci_save_state(bp->pdev);
2241
10badc21 2242 /* Chip reset provides power to the b44 MAC & PCI cores, which
5c513129 2243 * is necessary for MAC register access.
10badc21 2244 */
5c513129
GZ
2245 b44_chip_reset(bp);
2246
1da177e4
LT
2247 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2248 for (i = 0; i < 6; i++)
2249 printk("%2.2x%c", dev->dev_addr[i],
2250 i == 5 ? '\n' : ':');
2251
2252 return 0;
2253
2254err_out_iounmap:
2255 iounmap(bp->regs);
2256
2257err_out_free_dev:
2258 free_netdev(dev);
2259
2260err_out_free_res:
2261 pci_release_regions(pdev);
2262
2263err_out_disable_pdev:
2264 pci_disable_device(pdev);
2265 pci_set_drvdata(pdev, NULL);
2266 return err;
2267}
2268
2269static void __devexit b44_remove_one(struct pci_dev *pdev)
2270{
2271 struct net_device *dev = pci_get_drvdata(pdev);
874a6214 2272 struct b44 *bp = netdev_priv(dev);
1da177e4 2273
874a6214
FR
2274 unregister_netdev(dev);
2275 iounmap(bp->regs);
2276 free_netdev(dev);
2277 pci_release_regions(pdev);
2278 pci_disable_device(pdev);
2279 pci_set_drvdata(pdev, NULL);
1da177e4
LT
2280}
2281
2282static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2283{
2284 struct net_device *dev = pci_get_drvdata(pdev);
2285 struct b44 *bp = netdev_priv(dev);
2286
2287 if (!netif_running(dev))
2288 return 0;
2289
2290 del_timer_sync(&bp->timer);
2291
10badc21 2292 spin_lock_irq(&bp->lock);
1da177e4
LT
2293
2294 b44_halt(bp);
10badc21 2295 netif_carrier_off(bp->dev);
1da177e4
LT
2296 netif_device_detach(bp->dev);
2297 b44_free_rings(bp);
2298
2299 spin_unlock_irq(&bp->lock);
46e17853
PM
2300
2301 free_irq(dev->irq, dev);
52cafd96 2302 if (bp->flags & B44_FLAG_WOL_ENABLE) {
5fc7d61a 2303 b44_init_hw(bp, B44_PARTIAL_RESET);
52cafd96
GZ
2304 b44_setup_wol(bp);
2305 }
d58da590 2306 pci_disable_device(pdev);
1da177e4
LT
2307 return 0;
2308}
2309
2310static int b44_resume(struct pci_dev *pdev)
2311{
2312 struct net_device *dev = pci_get_drvdata(pdev);
2313 struct b44 *bp = netdev_priv(dev);
90afd0e5 2314 int rc = 0;
1da177e4
LT
2315
2316 pci_restore_state(pdev);
90afd0e5
DM
2317 rc = pci_enable_device(pdev);
2318 if (rc) {
2319 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2320 dev->name);
2321 return rc;
2322 }
2323
d58da590 2324 pci_set_master(pdev);
1da177e4
LT
2325
2326 if (!netif_running(dev))
2327 return 0;
2328
90afd0e5
DM
2329 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2330 if (rc) {
46e17853 2331 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
90afd0e5
DM
2332 pci_disable_device(pdev);
2333 return rc;
2334 }
46e17853 2335
1da177e4
LT
2336 spin_lock_irq(&bp->lock);
2337
2338 b44_init_rings(bp);
5fc7d61a 2339 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
2340 netif_device_attach(bp->dev);
2341 spin_unlock_irq(&bp->lock);
2342
1da177e4 2343 b44_enable_ints(bp);
d9e2d185 2344 netif_wake_queue(dev);
a72a8179
SH
2345
2346 mod_timer(&bp->timer, jiffies + 1);
2347
1da177e4
LT
2348 return 0;
2349}
2350
2351static struct pci_driver b44_driver = {
2352 .name = DRV_MODULE_NAME,
2353 .id_table = b44_pci_tbl,
2354 .probe = b44_init_one,
2355 .remove = __devexit_p(b44_remove_one),
2356 .suspend = b44_suspend,
2357 .resume = b44_resume,
2358};
2359
2360static int __init b44_init(void)
2361{
9f38c636
JL
2362 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2363
2364 /* Setup paramaters for syncing RX/TX DMA descriptors */
2365 dma_desc_align_mask = ~(dma_desc_align_size - 1);
22d4d771 2366 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
9f38c636 2367
29917620 2368 return pci_register_driver(&b44_driver);
1da177e4
LT
2369}
2370
2371static void __exit b44_cleanup(void)
2372{
2373 pci_unregister_driver(&b44_driver);
2374}
2375
2376module_init(b44_init);
2377module_exit(b44_cleanup);
2378