b44: timer power saving
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / b44.c
CommitLineData
1da177e4
LT
1/* b44.c: Broadcom 4400 device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
8056bfaf 5 * Copyright (C) 2006 Broadcom Corporation.
1da177e4
LT
6 *
7 * Distribute under GPL.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/types.h>
14#include <linux/netdevice.h>
15#include <linux/ethtool.h>
16#include <linux/mii.h>
17#include <linux/if_ether.h>
18#include <linux/etherdevice.h>
19#include <linux/pci.h>
20#include <linux/delay.h>
21#include <linux/init.h>
89358f90 22#include <linux/dma-mapping.h>
1da177e4
LT
23
24#include <asm/uaccess.h>
25#include <asm/io.h>
26#include <asm/irq.h>
27
28#include "b44.h"
29
30#define DRV_MODULE_NAME "b44"
31#define PFX DRV_MODULE_NAME ": "
4d1dabdb
GZ
32#define DRV_MODULE_VERSION "1.01"
33#define DRV_MODULE_RELDATE "Jun 16, 2006"
1da177e4
LT
34
35#define B44_DEF_MSG_ENABLE \
36 (NETIF_MSG_DRV | \
37 NETIF_MSG_PROBE | \
38 NETIF_MSG_LINK | \
39 NETIF_MSG_TIMER | \
40 NETIF_MSG_IFDOWN | \
41 NETIF_MSG_IFUP | \
42 NETIF_MSG_RX_ERR | \
43 NETIF_MSG_TX_ERR)
44
45/* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
47 */
48#define B44_TX_TIMEOUT (5 * HZ)
49
50/* hardware minimum and maximum for a single frame's data payload */
51#define B44_MIN_MTU 60
52#define B44_MAX_MTU 1500
53
54#define B44_RX_RING_SIZE 512
55#define B44_DEF_RX_RING_PENDING 200
56#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
57 B44_RX_RING_SIZE)
58#define B44_TX_RING_SIZE 512
59#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
61 B44_TX_RING_SIZE)
1da177e4
LT
62
63#define TX_RING_GAP(BP) \
64 (B44_TX_RING_SIZE - (BP)->tx_pending)
65#define TX_BUFFS_AVAIL(BP) \
66 (((BP)->tx_cons <= (BP)->tx_prod) ? \
67 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
68 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
69#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
70
71#define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
72#define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
73
74/* minimum number of free TX descriptors required to wake up TX process */
75#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
76
725ad800
GZ
77/* b44 internal pattern match filter info */
78#define B44_PATTERN_BASE 0x400
79#define B44_PATTERN_SIZE 0x80
80#define B44_PMASK_BASE 0x600
81#define B44_PMASK_SIZE 0x10
82#define B44_MAX_PATTERNS 16
83#define B44_ETHIPV6UDP_HLEN 62
84#define B44_ETHIPV4UDP_HLEN 42
85
1da177e4
LT
86static char version[] __devinitdata =
87 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
88
89MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
90MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
91MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_MODULE_VERSION);
93
94static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
95module_param(b44_debug, int, 0);
96MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
97
98static struct pci_device_id b44_pci_tbl[] = {
99 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
100 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
101 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
102 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
103 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
104 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
105 { } /* terminate list with empty entry */
106};
107
108MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
109
110static void b44_halt(struct b44 *);
111static void b44_init_rings(struct b44 *);
5fc7d61a
MC
112
113#define B44_FULL_RESET 1
114#define B44_FULL_RESET_SKIP_PHY 2
115#define B44_PARTIAL_RESET 3
116
00e8b3aa 117static void b44_init_hw(struct b44 *, int);
1da177e4 118
9f38c636
JL
119static int dma_desc_align_mask;
120static int dma_desc_sync_size;
121
3353930d
FR
122static const char b44_gstrings[][ETH_GSTRING_LEN] = {
123#define _B44(x...) # x,
124B44_STAT_REG_DECLARE
125#undef _B44
126};
127
9f38c636
JL
128static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
129 dma_addr_t dma_base,
130 unsigned long offset,
131 enum dma_data_direction dir)
132{
133 dma_sync_single_range_for_device(&pdev->dev, dma_base,
134 offset & dma_desc_align_mask,
135 dma_desc_sync_size, dir);
136}
137
138static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
139 dma_addr_t dma_base,
140 unsigned long offset,
141 enum dma_data_direction dir)
142{
143 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
144 offset & dma_desc_align_mask,
145 dma_desc_sync_size, dir);
146}
147
1da177e4
LT
148static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
149{
150 return readl(bp->regs + reg);
151}
152
10badc21 153static inline void bw32(const struct b44 *bp,
1da177e4
LT
154 unsigned long reg, unsigned long val)
155{
156 writel(val, bp->regs + reg);
157}
158
159static int b44_wait_bit(struct b44 *bp, unsigned long reg,
160 u32 bit, unsigned long timeout, const int clear)
161{
162 unsigned long i;
163
164 for (i = 0; i < timeout; i++) {
165 u32 val = br32(bp, reg);
166
167 if (clear && !(val & bit))
168 break;
169 if (!clear && (val & bit))
170 break;
171 udelay(10);
172 }
173 if (i == timeout) {
174 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
175 "%lx to %s.\n",
176 bp->dev->name,
177 bit, reg,
178 (clear ? "clear" : "set"));
179 return -ENODEV;
180 }
181 return 0;
182}
183
184/* Sonics SiliconBackplane support routines. ROFL, you should see all the
185 * buzz words used on this company's website :-)
186 *
187 * All of these routines must be invoked with bp->lock held and
188 * interrupts disabled.
189 */
190
191#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
192#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
193
194static u32 ssb_get_core_rev(struct b44 *bp)
195{
196 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
197}
198
199static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
200{
201 u32 bar_orig, pci_rev, val;
202
203 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
204 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
205 pci_rev = ssb_get_core_rev(bp);
206
207 val = br32(bp, B44_SBINTVEC);
208 val |= cores;
209 bw32(bp, B44_SBINTVEC, val);
210
211 val = br32(bp, SSB_PCI_TRANS_2);
212 val |= SSB_PCI_PREF | SSB_PCI_BURST;
213 bw32(bp, SSB_PCI_TRANS_2, val);
214
215 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
216
217 return pci_rev;
218}
219
220static void ssb_core_disable(struct b44 *bp)
221{
222 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
223 return;
224
225 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
226 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
227 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
228 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
229 SBTMSLOW_REJECT | SBTMSLOW_RESET));
230 br32(bp, B44_SBTMSLOW);
231 udelay(1);
232 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
233 br32(bp, B44_SBTMSLOW);
234 udelay(1);
235}
236
237static void ssb_core_reset(struct b44 *bp)
238{
239 u32 val;
240
241 ssb_core_disable(bp);
242 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
243 br32(bp, B44_SBTMSLOW);
244 udelay(1);
245
246 /* Clear SERR if set, this is a hw bug workaround. */
247 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
248 bw32(bp, B44_SBTMSHIGH, 0);
249
250 val = br32(bp, B44_SBIMSTATE);
251 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
252 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
253
254 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
255 br32(bp, B44_SBTMSLOW);
256 udelay(1);
257
258 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
259 br32(bp, B44_SBTMSLOW);
260 udelay(1);
261}
262
263static int ssb_core_unit(struct b44 *bp)
264{
265#if 0
266 u32 val = br32(bp, B44_SBADMATCH0);
267 u32 base;
268
269 type = val & SBADMATCH0_TYPE_MASK;
270 switch (type) {
271 case 0:
272 base = val & SBADMATCH0_BS0_MASK;
273 break;
274
275 case 1:
276 base = val & SBADMATCH0_BS1_MASK;
277 break;
278
279 case 2:
280 default:
281 base = val & SBADMATCH0_BS2_MASK;
282 break;
283 };
284#endif
285 return 0;
286}
287
288static int ssb_is_core_up(struct b44 *bp)
289{
290 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
291 == SBTMSLOW_CLOCK);
292}
293
294static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
295{
296 u32 val;
297
298 val = ((u32) data[2]) << 24;
299 val |= ((u32) data[3]) << 16;
300 val |= ((u32) data[4]) << 8;
301 val |= ((u32) data[5]) << 0;
302 bw32(bp, B44_CAM_DATA_LO, val);
10badc21 303 val = (CAM_DATA_HI_VALID |
1da177e4
LT
304 (((u32) data[0]) << 8) |
305 (((u32) data[1]) << 0));
306 bw32(bp, B44_CAM_DATA_HI, val);
307 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
308 (index << CAM_CTRL_INDEX_SHIFT)));
10badc21 309 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
1da177e4
LT
310}
311
312static inline void __b44_disable_ints(struct b44 *bp)
313{
314 bw32(bp, B44_IMASK, 0);
315}
316
317static void b44_disable_ints(struct b44 *bp)
318{
319 __b44_disable_ints(bp);
320
321 /* Flush posted writes. */
322 br32(bp, B44_IMASK);
323}
324
325static void b44_enable_ints(struct b44 *bp)
326{
327 bw32(bp, B44_IMASK, bp->imask);
328}
329
330static int b44_readphy(struct b44 *bp, int reg, u32 *val)
331{
332 int err;
333
334 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
335 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
336 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
337 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
338 (reg << MDIO_DATA_RA_SHIFT) |
339 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
340 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
341 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
342
343 return err;
344}
345
346static int b44_writephy(struct b44 *bp, int reg, u32 val)
347{
348 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
349 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
350 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
351 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
352 (reg << MDIO_DATA_RA_SHIFT) |
353 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
354 (val & MDIO_DATA_DATA)));
355 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
356}
357
358/* miilib interface */
359/* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
360 * due to code existing before miilib use was added to this driver.
361 * Someone should remove this artificial driver limitation in
362 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
363 */
364static int b44_mii_read(struct net_device *dev, int phy_id, int location)
365{
366 u32 val;
367 struct b44 *bp = netdev_priv(dev);
368 int rc = b44_readphy(bp, location, &val);
369 if (rc)
370 return 0xffffffff;
371 return val;
372}
373
374static void b44_mii_write(struct net_device *dev, int phy_id, int location,
375 int val)
376{
377 struct b44 *bp = netdev_priv(dev);
378 b44_writephy(bp, location, val);
379}
380
381static int b44_phy_reset(struct b44 *bp)
382{
383 u32 val;
384 int err;
385
386 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
387 if (err)
388 return err;
389 udelay(100);
390 err = b44_readphy(bp, MII_BMCR, &val);
391 if (!err) {
392 if (val & BMCR_RESET) {
393 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
394 bp->dev->name);
395 err = -ENODEV;
396 }
397 }
398
399 return 0;
400}
401
402static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
403{
404 u32 val;
405
406 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
407 bp->flags |= pause_flags;
408
409 val = br32(bp, B44_RXCONFIG);
410 if (pause_flags & B44_FLAG_RX_PAUSE)
411 val |= RXCONFIG_FLOW;
412 else
413 val &= ~RXCONFIG_FLOW;
414 bw32(bp, B44_RXCONFIG, val);
415
416 val = br32(bp, B44_MAC_FLOW);
417 if (pause_flags & B44_FLAG_TX_PAUSE)
418 val |= (MAC_FLOW_PAUSE_ENAB |
419 (0xc0 & MAC_FLOW_RX_HI_WATER));
420 else
421 val &= ~MAC_FLOW_PAUSE_ENAB;
422 bw32(bp, B44_MAC_FLOW, val);
423}
424
425static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
426{
10badc21 427 u32 pause_enab = 0;
2b474cf5
GZ
428
429 /* The driver supports only rx pause by default because
10badc21
JG
430 the b44 mac tx pause mechanism generates excessive
431 pause frames.
2b474cf5
GZ
432 Use ethtool to turn on b44 tx pause if necessary.
433 */
434 if ((local & ADVERTISE_PAUSE_CAP) &&
10badc21 435 (local & ADVERTISE_PAUSE_ASYM)){
2b474cf5
GZ
436 if ((remote & LPA_PAUSE_ASYM) &&
437 !(remote & LPA_PAUSE_CAP))
438 pause_enab |= B44_FLAG_RX_PAUSE;
1da177e4
LT
439 }
440
441 __b44_set_flow_ctrl(bp, pause_enab);
442}
443
444static int b44_setup_phy(struct b44 *bp)
445{
446 u32 val;
447 int err;
448
449 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
450 goto out;
451 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
452 val & MII_ALEDCTRL_ALLMSK)) != 0)
453 goto out;
454 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
455 goto out;
456 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
457 val | MII_TLEDCTRL_ENABLE)) != 0)
458 goto out;
459
460 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
461 u32 adv = ADVERTISE_CSMA;
462
463 if (bp->flags & B44_FLAG_ADV_10HALF)
464 adv |= ADVERTISE_10HALF;
465 if (bp->flags & B44_FLAG_ADV_10FULL)
466 adv |= ADVERTISE_10FULL;
467 if (bp->flags & B44_FLAG_ADV_100HALF)
468 adv |= ADVERTISE_100HALF;
469 if (bp->flags & B44_FLAG_ADV_100FULL)
470 adv |= ADVERTISE_100FULL;
471
472 if (bp->flags & B44_FLAG_PAUSE_AUTO)
473 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
474
475 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
476 goto out;
477 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
478 BMCR_ANRESTART))) != 0)
479 goto out;
480 } else {
481 u32 bmcr;
482
483 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
484 goto out;
485 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
486 if (bp->flags & B44_FLAG_100_BASE_T)
487 bmcr |= BMCR_SPEED100;
488 if (bp->flags & B44_FLAG_FULL_DUPLEX)
489 bmcr |= BMCR_FULLDPLX;
490 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
491 goto out;
492
493 /* Since we will not be negotiating there is no safe way
494 * to determine if the link partner supports flow control
495 * or not. So just disable it completely in this case.
496 */
497 b44_set_flow_ctrl(bp, 0, 0);
498 }
499
500out:
501 return err;
502}
503
504static void b44_stats_update(struct b44 *bp)
505{
506 unsigned long reg;
507 u32 *val;
508
509 val = &bp->hw_stats.tx_good_octets;
510 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
511 *val++ += br32(bp, reg);
512 }
3353930d
FR
513
514 /* Pad */
515 reg += 8*4UL;
516
1da177e4
LT
517 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
518 *val++ += br32(bp, reg);
519 }
520}
521
522static void b44_link_report(struct b44 *bp)
523{
524 if (!netif_carrier_ok(bp->dev)) {
525 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
526 } else {
527 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
528 bp->dev->name,
529 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
530 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
531
532 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
533 "%s for RX.\n",
534 bp->dev->name,
535 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
536 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
537 }
538}
539
540static void b44_check_phy(struct b44 *bp)
541{
542 u32 bmsr, aux;
543
544 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
545 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
546 (bmsr != 0xffff)) {
547 if (aux & MII_AUXCTRL_SPEED)
548 bp->flags |= B44_FLAG_100_BASE_T;
549 else
550 bp->flags &= ~B44_FLAG_100_BASE_T;
551 if (aux & MII_AUXCTRL_DUPLEX)
552 bp->flags |= B44_FLAG_FULL_DUPLEX;
553 else
554 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
555
556 if (!netif_carrier_ok(bp->dev) &&
557 (bmsr & BMSR_LSTATUS)) {
558 u32 val = br32(bp, B44_TX_CTRL);
559 u32 local_adv, remote_adv;
560
561 if (bp->flags & B44_FLAG_FULL_DUPLEX)
562 val |= TX_CTRL_DUPLEX;
563 else
564 val &= ~TX_CTRL_DUPLEX;
565 bw32(bp, B44_TX_CTRL, val);
566
567 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
568 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
569 !b44_readphy(bp, MII_LPA, &remote_adv))
570 b44_set_flow_ctrl(bp, local_adv, remote_adv);
571
572 /* Link now up */
573 netif_carrier_on(bp->dev);
574 b44_link_report(bp);
575 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
576 /* Link now down */
577 netif_carrier_off(bp->dev);
578 b44_link_report(bp);
579 }
580
581 if (bmsr & BMSR_RFAULT)
582 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
583 bp->dev->name);
584 if (bmsr & BMSR_JCD)
585 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
586 bp->dev->name);
587 }
588}
589
590static void b44_timer(unsigned long __opaque)
591{
592 struct b44 *bp = (struct b44 *) __opaque;
593
594 spin_lock_irq(&bp->lock);
595
596 b44_check_phy(bp);
597
598 b44_stats_update(bp);
599
600 spin_unlock_irq(&bp->lock);
601
a72a8179 602 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
1da177e4
LT
603}
604
605static void b44_tx(struct b44 *bp)
606{
607 u32 cur, cons;
608
609 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
610 cur /= sizeof(struct dma_desc);
611
612 /* XXX needs updating when NETIF_F_SG is supported */
613 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
614 struct ring_info *rp = &bp->tx_buffers[cons];
615 struct sk_buff *skb = rp->skb;
616
5d9428de 617 BUG_ON(skb == NULL);
1da177e4
LT
618
619 pci_unmap_single(bp->pdev,
620 pci_unmap_addr(rp, mapping),
621 skb->len,
622 PCI_DMA_TODEVICE);
623 rp->skb = NULL;
624 dev_kfree_skb_irq(skb);
625 }
626
627 bp->tx_cons = cons;
628 if (netif_queue_stopped(bp->dev) &&
629 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
630 netif_wake_queue(bp->dev);
631
632 bw32(bp, B44_GPTIMER, 0);
633}
634
635/* Works like this. This chip writes a 'struct rx_header" 30 bytes
636 * before the DMA address you give it. So we allocate 30 more bytes
637 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
638 * point the chip at 30 bytes past where the rx_header will go.
639 */
640static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
641{
642 struct dma_desc *dp;
643 struct ring_info *src_map, *map;
644 struct rx_header *rh;
645 struct sk_buff *skb;
646 dma_addr_t mapping;
647 int dest_idx;
648 u32 ctrl;
649
650 src_map = NULL;
651 if (src_idx >= 0)
652 src_map = &bp->rx_buffers[src_idx];
653 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
654 map = &bp->rx_buffers[dest_idx];
655 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
656 if (skb == NULL)
657 return -ENOMEM;
658
659 mapping = pci_map_single(bp->pdev, skb->data,
660 RX_PKT_BUF_SZ,
661 PCI_DMA_FROMDEVICE);
662
663 /* Hardware bug work-around, the chip is unable to do PCI DMA
664 to/from anything above 1GB :-( */
639b421b 665 if (dma_mapping_error(mapping) ||
97db9ee7 666 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
1da177e4 667 /* Sigh... */
639b421b
AK
668 if (!dma_mapping_error(mapping))
669 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
1da177e4
LT
670 dev_kfree_skb_any(skb);
671 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
672 if (skb == NULL)
673 return -ENOMEM;
674 mapping = pci_map_single(bp->pdev, skb->data,
675 RX_PKT_BUF_SZ,
676 PCI_DMA_FROMDEVICE);
639b421b 677 if (dma_mapping_error(mapping) ||
97db9ee7 678 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
639b421b
AK
679 if (!dma_mapping_error(mapping))
680 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
1da177e4
LT
681 dev_kfree_skb_any(skb);
682 return -ENOMEM;
683 }
684 }
685
686 skb->dev = bp->dev;
687 skb_reserve(skb, bp->rx_offset);
688
689 rh = (struct rx_header *)
690 (skb->data - bp->rx_offset);
691 rh->len = 0;
692 rh->flags = 0;
693
694 map->skb = skb;
695 pci_unmap_addr_set(map, mapping, mapping);
696
697 if (src_map != NULL)
698 src_map->skb = NULL;
699
700 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
701 if (dest_idx == (B44_RX_RING_SIZE - 1))
702 ctrl |= DESC_CTRL_EOT;
703
704 dp = &bp->rx_ring[dest_idx];
705 dp->ctrl = cpu_to_le32(ctrl);
706 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
707
9f38c636
JL
708 if (bp->flags & B44_FLAG_RX_RING_HACK)
709 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
710 dest_idx * sizeof(dp),
711 DMA_BIDIRECTIONAL);
712
1da177e4
LT
713 return RX_PKT_BUF_SZ;
714}
715
716static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
717{
718 struct dma_desc *src_desc, *dest_desc;
719 struct ring_info *src_map, *dest_map;
720 struct rx_header *rh;
721 int dest_idx;
a7bed27d 722 __le32 ctrl;
1da177e4
LT
723
724 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
725 dest_desc = &bp->rx_ring[dest_idx];
726 dest_map = &bp->rx_buffers[dest_idx];
727 src_desc = &bp->rx_ring[src_idx];
728 src_map = &bp->rx_buffers[src_idx];
729
730 dest_map->skb = src_map->skb;
731 rh = (struct rx_header *) src_map->skb->data;
732 rh->len = 0;
733 rh->flags = 0;
734 pci_unmap_addr_set(dest_map, mapping,
735 pci_unmap_addr(src_map, mapping));
736
9f38c636
JL
737 if (bp->flags & B44_FLAG_RX_RING_HACK)
738 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
739 src_idx * sizeof(src_desc),
740 DMA_BIDIRECTIONAL);
741
1da177e4
LT
742 ctrl = src_desc->ctrl;
743 if (dest_idx == (B44_RX_RING_SIZE - 1))
744 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
745 else
746 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
747
748 dest_desc->ctrl = ctrl;
749 dest_desc->addr = src_desc->addr;
9f38c636 750
1da177e4
LT
751 src_map->skb = NULL;
752
9f38c636
JL
753 if (bp->flags & B44_FLAG_RX_RING_HACK)
754 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
755 dest_idx * sizeof(dest_desc),
756 DMA_BIDIRECTIONAL);
757
00576e93 758 pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
1da177e4
LT
759 RX_PKT_BUF_SZ,
760 PCI_DMA_FROMDEVICE);
761}
762
763static int b44_rx(struct b44 *bp, int budget)
764{
765 int received;
766 u32 cons, prod;
767
768 received = 0;
769 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
770 prod /= sizeof(struct dma_desc);
771 cons = bp->rx_cons;
772
773 while (cons != prod && budget > 0) {
774 struct ring_info *rp = &bp->rx_buffers[cons];
775 struct sk_buff *skb = rp->skb;
776 dma_addr_t map = pci_unmap_addr(rp, mapping);
777 struct rx_header *rh;
778 u16 len;
779
780 pci_dma_sync_single_for_cpu(bp->pdev, map,
781 RX_PKT_BUF_SZ,
782 PCI_DMA_FROMDEVICE);
783 rh = (struct rx_header *) skb->data;
a7bed27d 784 len = le16_to_cpu(rh->len);
1da177e4
LT
785 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
786 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
787 drop_it:
788 b44_recycle_rx(bp, cons, bp->rx_prod);
789 drop_it_no_recycle:
790 bp->stats.rx_dropped++;
791 goto next_pkt;
792 }
793
794 if (len == 0) {
795 int i = 0;
796
797 do {
798 udelay(2);
799 barrier();
a7bed27d 800 len = le16_to_cpu(rh->len);
1da177e4
LT
801 } while (len == 0 && i++ < 5);
802 if (len == 0)
803 goto drop_it;
804 }
805
806 /* Omit CRC. */
807 len -= 4;
808
809 if (len > RX_COPY_THRESHOLD) {
810 int skb_size;
811 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
812 if (skb_size < 0)
813 goto drop_it;
814 pci_unmap_single(bp->pdev, map,
815 skb_size, PCI_DMA_FROMDEVICE);
816 /* Leave out rx_header */
817 skb_put(skb, len+bp->rx_offset);
818 skb_pull(skb,bp->rx_offset);
819 } else {
820 struct sk_buff *copy_skb;
821
822 b44_recycle_rx(bp, cons, bp->rx_prod);
823 copy_skb = dev_alloc_skb(len + 2);
824 if (copy_skb == NULL)
825 goto drop_it_no_recycle;
826
1da177e4
LT
827 skb_reserve(copy_skb, 2);
828 skb_put(copy_skb, len);
829 /* DMA sync done above, copy just the actual packet */
d626f62b
ACM
830 skb_copy_from_linear_data_offset(skb, bp->rx_offset,
831 copy_skb->data, len);
1da177e4
LT
832 skb = copy_skb;
833 }
834 skb->ip_summed = CHECKSUM_NONE;
835 skb->protocol = eth_type_trans(skb, bp->dev);
836 netif_receive_skb(skb);
837 bp->dev->last_rx = jiffies;
838 received++;
839 budget--;
840 next_pkt:
841 bp->rx_prod = (bp->rx_prod + 1) &
842 (B44_RX_RING_SIZE - 1);
843 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
844 }
845
846 bp->rx_cons = cons;
847 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
848
849 return received;
850}
851
852static int b44_poll(struct net_device *netdev, int *budget)
853{
854 struct b44 *bp = netdev_priv(netdev);
855 int done;
856
857 spin_lock_irq(&bp->lock);
858
859 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
860 /* spin_lock(&bp->tx_lock); */
861 b44_tx(bp);
862 /* spin_unlock(&bp->tx_lock); */
863 }
864 spin_unlock_irq(&bp->lock);
865
866 done = 1;
867 if (bp->istat & ISTAT_RX) {
868 int orig_budget = *budget;
869 int work_done;
870
871 if (orig_budget > netdev->quota)
872 orig_budget = netdev->quota;
873
874 work_done = b44_rx(bp, orig_budget);
875
876 *budget -= work_done;
877 netdev->quota -= work_done;
878
879 if (work_done >= orig_budget)
880 done = 0;
881 }
882
883 if (bp->istat & ISTAT_ERRORS) {
d15e9c4d
FR
884 unsigned long flags;
885
886 spin_lock_irqsave(&bp->lock, flags);
1da177e4
LT
887 b44_halt(bp);
888 b44_init_rings(bp);
5fc7d61a 889 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
1da177e4 890 netif_wake_queue(bp->dev);
d15e9c4d 891 spin_unlock_irqrestore(&bp->lock, flags);
1da177e4
LT
892 done = 1;
893 }
894
895 if (done) {
896 netif_rx_complete(netdev);
897 b44_enable_ints(bp);
898 }
899
900 return (done ? 0 : 1);
901}
902
7d12e780 903static irqreturn_t b44_interrupt(int irq, void *dev_id)
1da177e4
LT
904{
905 struct net_device *dev = dev_id;
906 struct b44 *bp = netdev_priv(dev);
1da177e4
LT
907 u32 istat, imask;
908 int handled = 0;
909
65b984f2 910 spin_lock(&bp->lock);
1da177e4
LT
911
912 istat = br32(bp, B44_ISTAT);
913 imask = br32(bp, B44_IMASK);
914
e78181fe
JB
915 /* The interrupt mask register controls which interrupt bits
916 * will actually raise an interrupt to the CPU when set by hw/firmware,
917 * but doesn't mask off the bits.
1da177e4
LT
918 */
919 istat &= imask;
920 if (istat) {
921 handled = 1;
ba5eec9c
FR
922
923 if (unlikely(!netif_running(dev))) {
924 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
925 goto irq_ack;
926 }
927
1da177e4
LT
928 if (netif_rx_schedule_prep(dev)) {
929 /* NOTE: These writes are posted by the readback of
930 * the ISTAT register below.
931 */
932 bp->istat = istat;
933 __b44_disable_ints(bp);
934 __netif_rx_schedule(dev);
935 } else {
936 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
937 dev->name);
938 }
939
ba5eec9c 940irq_ack:
1da177e4
LT
941 bw32(bp, B44_ISTAT, istat);
942 br32(bp, B44_ISTAT);
943 }
65b984f2 944 spin_unlock(&bp->lock);
1da177e4
LT
945 return IRQ_RETVAL(handled);
946}
947
948static void b44_tx_timeout(struct net_device *dev)
949{
950 struct b44 *bp = netdev_priv(dev);
951
952 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
953 dev->name);
954
955 spin_lock_irq(&bp->lock);
956
957 b44_halt(bp);
958 b44_init_rings(bp);
5fc7d61a 959 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
960
961 spin_unlock_irq(&bp->lock);
962
963 b44_enable_ints(bp);
964
965 netif_wake_queue(dev);
966}
967
968static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
969{
970 struct b44 *bp = netdev_priv(dev);
971 struct sk_buff *bounce_skb;
c7193693 972 int rc = NETDEV_TX_OK;
1da177e4
LT
973 dma_addr_t mapping;
974 u32 len, entry, ctrl;
975
976 len = skb->len;
977 spin_lock_irq(&bp->lock);
978
979 /* This is a hard error, log it. */
980 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
981 netif_stop_queue(dev);
1da177e4
LT
982 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
983 dev->name);
c7193693 984 goto err_out;
1da177e4
LT
985 }
986
987 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
97db9ee7 988 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
1da177e4 989 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
639b421b
AK
990 if (!dma_mapping_error(mapping))
991 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
1da177e4
LT
992
993 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
994 GFP_ATOMIC|GFP_DMA);
995 if (!bounce_skb)
c7193693 996 goto err_out;
1da177e4
LT
997
998 mapping = pci_map_single(bp->pdev, bounce_skb->data,
999 len, PCI_DMA_TODEVICE);
97db9ee7 1000 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
639b421b
AK
1001 if (!dma_mapping_error(mapping))
1002 pci_unmap_single(bp->pdev, mapping,
1da177e4
LT
1003 len, PCI_DMA_TODEVICE);
1004 dev_kfree_skb_any(bounce_skb);
c7193693 1005 goto err_out;
1da177e4
LT
1006 }
1007
d626f62b
ACM
1008 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len),
1009 skb->len);
1da177e4
LT
1010 dev_kfree_skb_any(skb);
1011 skb = bounce_skb;
1012 }
1013
1014 entry = bp->tx_prod;
1015 bp->tx_buffers[entry].skb = skb;
1016 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1017
1018 ctrl = (len & DESC_CTRL_LEN);
1019 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1020 if (entry == (B44_TX_RING_SIZE - 1))
1021 ctrl |= DESC_CTRL_EOT;
1022
1023 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1024 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1025
9f38c636
JL
1026 if (bp->flags & B44_FLAG_TX_RING_HACK)
1027 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1028 entry * sizeof(bp->tx_ring[0]),
1029 DMA_TO_DEVICE);
1030
1da177e4
LT
1031 entry = NEXT_TX(entry);
1032
1033 bp->tx_prod = entry;
1034
1035 wmb();
1036
1037 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1038 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1039 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1040 if (bp->flags & B44_FLAG_REORDER_BUG)
1041 br32(bp, B44_DMATX_PTR);
1042
1043 if (TX_BUFFS_AVAIL(bp) < 1)
1044 netif_stop_queue(dev);
1045
c7193693
FR
1046 dev->trans_start = jiffies;
1047
1048out_unlock:
1da177e4
LT
1049 spin_unlock_irq(&bp->lock);
1050
c7193693 1051 return rc;
1da177e4 1052
c7193693
FR
1053err_out:
1054 rc = NETDEV_TX_BUSY;
1055 goto out_unlock;
1da177e4
LT
1056}
1057
1058static int b44_change_mtu(struct net_device *dev, int new_mtu)
1059{
1060 struct b44 *bp = netdev_priv(dev);
1061
1062 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1063 return -EINVAL;
1064
1065 if (!netif_running(dev)) {
1066 /* We'll just catch it later when the
1067 * device is up'd.
1068 */
1069 dev->mtu = new_mtu;
1070 return 0;
1071 }
1072
1073 spin_lock_irq(&bp->lock);
1074 b44_halt(bp);
1075 dev->mtu = new_mtu;
1076 b44_init_rings(bp);
5fc7d61a 1077 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1078 spin_unlock_irq(&bp->lock);
1079
1080 b44_enable_ints(bp);
10badc21 1081
1da177e4
LT
1082 return 0;
1083}
1084
1085/* Free up pending packets in all rx/tx rings.
1086 *
1087 * The chip has been shut down and the driver detached from
1088 * the networking, so no interrupts or new tx packets will
1089 * end up in the driver. bp->lock is not held and we are not
1090 * in an interrupt context and thus may sleep.
1091 */
1092static void b44_free_rings(struct b44 *bp)
1093{
1094 struct ring_info *rp;
1095 int i;
1096
1097 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1098 rp = &bp->rx_buffers[i];
1099
1100 if (rp->skb == NULL)
1101 continue;
1102 pci_unmap_single(bp->pdev,
1103 pci_unmap_addr(rp, mapping),
1104 RX_PKT_BUF_SZ,
1105 PCI_DMA_FROMDEVICE);
1106 dev_kfree_skb_any(rp->skb);
1107 rp->skb = NULL;
1108 }
1109
1110 /* XXX needs changes once NETIF_F_SG is set... */
1111 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1112 rp = &bp->tx_buffers[i];
1113
1114 if (rp->skb == NULL)
1115 continue;
1116 pci_unmap_single(bp->pdev,
1117 pci_unmap_addr(rp, mapping),
1118 rp->skb->len,
1119 PCI_DMA_TODEVICE);
1120 dev_kfree_skb_any(rp->skb);
1121 rp->skb = NULL;
1122 }
1123}
1124
1125/* Initialize tx/rx rings for packet processing.
1126 *
1127 * The chip has been shut down and the driver detached from
1128 * the networking, so no interrupts or new tx packets will
874a6214 1129 * end up in the driver.
1da177e4
LT
1130 */
1131static void b44_init_rings(struct b44 *bp)
1132{
1133 int i;
1134
1135 b44_free_rings(bp);
1136
1137 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1138 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1139
9f38c636
JL
1140 if (bp->flags & B44_FLAG_RX_RING_HACK)
1141 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1142 DMA_TABLE_BYTES,
1143 PCI_DMA_BIDIRECTIONAL);
1144
1145 if (bp->flags & B44_FLAG_TX_RING_HACK)
1146 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1147 DMA_TABLE_BYTES,
1148 PCI_DMA_TODEVICE);
1149
1da177e4
LT
1150 for (i = 0; i < bp->rx_pending; i++) {
1151 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1152 break;
1153 }
1154}
1155
1156/*
1157 * Must not be invoked with interrupt sources disabled and
1158 * the hardware shutdown down.
1159 */
1160static void b44_free_consistent(struct b44 *bp)
1161{
b4558ea9
JJ
1162 kfree(bp->rx_buffers);
1163 bp->rx_buffers = NULL;
1164 kfree(bp->tx_buffers);
1165 bp->tx_buffers = NULL;
1da177e4 1166 if (bp->rx_ring) {
9f38c636
JL
1167 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1168 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1169 DMA_TABLE_BYTES,
1170 DMA_BIDIRECTIONAL);
1171 kfree(bp->rx_ring);
1172 } else
1173 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1174 bp->rx_ring, bp->rx_ring_dma);
1da177e4 1175 bp->rx_ring = NULL;
9f38c636 1176 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1da177e4
LT
1177 }
1178 if (bp->tx_ring) {
9f38c636
JL
1179 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1180 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1181 DMA_TABLE_BYTES,
1182 DMA_TO_DEVICE);
1183 kfree(bp->tx_ring);
1184 } else
1185 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1186 bp->tx_ring, bp->tx_ring_dma);
1da177e4 1187 bp->tx_ring = NULL;
9f38c636 1188 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1da177e4
LT
1189 }
1190}
1191
1192/*
1193 * Must not be invoked with interrupt sources disabled and
1194 * the hardware shutdown down. Can sleep.
1195 */
1196static int b44_alloc_consistent(struct b44 *bp)
1197{
1198 int size;
1199
1200 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
874a6214 1201 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1da177e4
LT
1202 if (!bp->rx_buffers)
1203 goto out_err;
1da177e4
LT
1204
1205 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
874a6214 1206 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1da177e4
LT
1207 if (!bp->tx_buffers)
1208 goto out_err;
1da177e4
LT
1209
1210 size = DMA_TABLE_BYTES;
1211 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
9f38c636
JL
1212 if (!bp->rx_ring) {
1213 /* Allocation may have failed due to pci_alloc_consistent
1214 insisting on use of GFP_DMA, which is more restrictive
1215 than necessary... */
1216 struct dma_desc *rx_ring;
1217 dma_addr_t rx_ring_dma;
1218
874a6214
FR
1219 rx_ring = kzalloc(size, GFP_KERNEL);
1220 if (!rx_ring)
9f38c636
JL
1221 goto out_err;
1222
9f38c636
JL
1223 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1224 DMA_TABLE_BYTES,
1225 DMA_BIDIRECTIONAL);
1226
639b421b 1227 if (dma_mapping_error(rx_ring_dma) ||
97db9ee7 1228 rx_ring_dma + size > DMA_30BIT_MASK) {
9f38c636
JL
1229 kfree(rx_ring);
1230 goto out_err;
1231 }
1232
1233 bp->rx_ring = rx_ring;
1234 bp->rx_ring_dma = rx_ring_dma;
1235 bp->flags |= B44_FLAG_RX_RING_HACK;
1236 }
1da177e4
LT
1237
1238 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
9f38c636
JL
1239 if (!bp->tx_ring) {
1240 /* Allocation may have failed due to pci_alloc_consistent
1241 insisting on use of GFP_DMA, which is more restrictive
1242 than necessary... */
1243 struct dma_desc *tx_ring;
1244 dma_addr_t tx_ring_dma;
1245
874a6214
FR
1246 tx_ring = kzalloc(size, GFP_KERNEL);
1247 if (!tx_ring)
9f38c636
JL
1248 goto out_err;
1249
9f38c636
JL
1250 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1251 DMA_TABLE_BYTES,
1252 DMA_TO_DEVICE);
1253
639b421b 1254 if (dma_mapping_error(tx_ring_dma) ||
97db9ee7 1255 tx_ring_dma + size > DMA_30BIT_MASK) {
9f38c636
JL
1256 kfree(tx_ring);
1257 goto out_err;
1258 }
1259
1260 bp->tx_ring = tx_ring;
1261 bp->tx_ring_dma = tx_ring_dma;
1262 bp->flags |= B44_FLAG_TX_RING_HACK;
1263 }
1da177e4
LT
1264
1265 return 0;
1266
1267out_err:
1268 b44_free_consistent(bp);
1269 return -ENOMEM;
1270}
1271
1272/* bp->lock is held. */
1273static void b44_clear_stats(struct b44 *bp)
1274{
1275 unsigned long reg;
1276
1277 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1278 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1279 br32(bp, reg);
1280 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1281 br32(bp, reg);
1282}
1283
1284/* bp->lock is held. */
1285static void b44_chip_reset(struct b44 *bp)
1286{
1287 if (ssb_is_core_up(bp)) {
1288 bw32(bp, B44_RCV_LAZY, 0);
1289 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
40ee8c76 1290 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1da177e4
LT
1291 bw32(bp, B44_DMATX_CTRL, 0);
1292 bp->tx_prod = bp->tx_cons = 0;
1293 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1294 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1295 100, 0);
1296 }
1297 bw32(bp, B44_DMARX_CTRL, 0);
1298 bp->rx_prod = bp->rx_cons = 0;
1299 } else {
1300 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1301 SBINTVEC_ENET0 :
1302 SBINTVEC_ENET1));
1303 }
1304
1305 ssb_core_reset(bp);
1306
1307 b44_clear_stats(bp);
1308
1309 /* Make PHY accessible. */
1310 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1311 (0x0d & MDIO_CTRL_MAXF_MASK)));
1312 br32(bp, B44_MDIO_CTRL);
1313
1314 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1315 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1316 br32(bp, B44_ENET_CTRL);
1317 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1318 } else {
1319 u32 val = br32(bp, B44_DEVCTRL);
1320
1321 if (val & DEVCTRL_EPR) {
1322 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1323 br32(bp, B44_DEVCTRL);
1324 udelay(100);
1325 }
1326 bp->flags |= B44_FLAG_INTERNAL_PHY;
1327 }
1328}
1329
1330/* bp->lock is held. */
1331static void b44_halt(struct b44 *bp)
1332{
1333 b44_disable_ints(bp);
1334 b44_chip_reset(bp);
1335}
1336
1337/* bp->lock is held. */
1338static void __b44_set_mac_addr(struct b44 *bp)
1339{
1340 bw32(bp, B44_CAM_CTRL, 0);
1341 if (!(bp->dev->flags & IFF_PROMISC)) {
1342 u32 val;
1343
1344 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1345 val = br32(bp, B44_CAM_CTRL);
1346 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1347 }
1348}
1349
1350static int b44_set_mac_addr(struct net_device *dev, void *p)
1351{
1352 struct b44 *bp = netdev_priv(dev);
1353 struct sockaddr *addr = p;
1354
1355 if (netif_running(dev))
1356 return -EBUSY;
1357
391fc09a
GZ
1358 if (!is_valid_ether_addr(addr->sa_data))
1359 return -EINVAL;
1360
1da177e4
LT
1361 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1362
1363 spin_lock_irq(&bp->lock);
1364 __b44_set_mac_addr(bp);
1365 spin_unlock_irq(&bp->lock);
1366
1367 return 0;
1368}
1369
1370/* Called at device open time to get the chip ready for
1371 * packet processing. Invoked with bp->lock held.
1372 */
1373static void __b44_set_rx_mode(struct net_device *);
5fc7d61a 1374static void b44_init_hw(struct b44 *bp, int reset_kind)
1da177e4
LT
1375{
1376 u32 val;
1377
1378 b44_chip_reset(bp);
5fc7d61a 1379 if (reset_kind == B44_FULL_RESET) {
00e8b3aa
GZ
1380 b44_phy_reset(bp);
1381 b44_setup_phy(bp);
1382 }
1da177e4
LT
1383
1384 /* Enable CRC32, set proper LED modes and power on PHY */
1385 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1386 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1387
1388 /* This sets the MAC address too. */
1389 __b44_set_rx_mode(bp->dev);
1390
1391 /* MTU + eth header + possible VLAN tag + struct rx_header */
1392 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1393 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1394
1395 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
5fc7d61a
MC
1396 if (reset_kind == B44_PARTIAL_RESET) {
1397 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1398 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1399 } else {
00e8b3aa
GZ
1400 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1401 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1402 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1403 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1404 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1da177e4 1405
00e8b3aa
GZ
1406 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1407 bp->rx_prod = bp->rx_pending;
1da177e4 1408
00e8b3aa 1409 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
00e8b3aa 1410 }
1da177e4
LT
1411
1412 val = br32(bp, B44_ENET_CTRL);
1413 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1414}
1415
1416static int b44_open(struct net_device *dev)
1417{
1418 struct b44 *bp = netdev_priv(dev);
1419 int err;
1420
1421 err = b44_alloc_consistent(bp);
1422 if (err)
6c2f4267 1423 goto out;
1da177e4
LT
1424
1425 b44_init_rings(bp);
5fc7d61a 1426 b44_init_hw(bp, B44_FULL_RESET);
1da177e4 1427
e254e9bf
JL
1428 b44_check_phy(bp);
1429
1fb9df5d 1430 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
6c2f4267
FR
1431 if (unlikely(err < 0)) {
1432 b44_chip_reset(bp);
1433 b44_free_rings(bp);
1434 b44_free_consistent(bp);
1435 goto out;
1436 }
1da177e4
LT
1437
1438 init_timer(&bp->timer);
1439 bp->timer.expires = jiffies + HZ;
1440 bp->timer.data = (unsigned long) bp;
1441 bp->timer.function = b44_timer;
1442 add_timer(&bp->timer);
1443
1444 b44_enable_ints(bp);
d9e2d185 1445 netif_start_queue(dev);
6c2f4267 1446out:
1da177e4
LT
1447 return err;
1448}
1449
1450#if 0
1451/*static*/ void b44_dump_state(struct b44 *bp)
1452{
1453 u32 val32, val32_2, val32_3, val32_4, val32_5;
1454 u16 val16;
1455
1456 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1457 printk("DEBUG: PCI status [%04x] \n", val16);
1458
1459}
1460#endif
1461
1462#ifdef CONFIG_NET_POLL_CONTROLLER
1463/*
1464 * Polling receive - used by netconsole and other diagnostic tools
1465 * to allow network i/o with interrupts disabled.
1466 */
1467static void b44_poll_controller(struct net_device *dev)
1468{
1469 disable_irq(dev->irq);
7d12e780 1470 b44_interrupt(dev->irq, dev);
1da177e4
LT
1471 enable_irq(dev->irq);
1472}
1473#endif
1474
725ad800
GZ
1475static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1476{
1477 u32 i;
1478 u32 *pattern = (u32 *) pp;
1479
1480 for (i = 0; i < bytes; i += sizeof(u32)) {
1481 bw32(bp, B44_FILT_ADDR, table_offset + i);
1482 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1483 }
1484}
1485
1486static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1487{
1488 int magicsync = 6;
1489 int k, j, len = offset;
1490 int ethaddr_bytes = ETH_ALEN;
1491
1492 memset(ppattern + offset, 0xff, magicsync);
1493 for (j = 0; j < magicsync; j++)
1494 set_bit(len++, (unsigned long *) pmask);
1495
1496 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1497 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1498 ethaddr_bytes = ETH_ALEN;
1499 else
1500 ethaddr_bytes = B44_PATTERN_SIZE - len;
1501 if (ethaddr_bytes <=0)
1502 break;
1503 for (k = 0; k< ethaddr_bytes; k++) {
1504 ppattern[offset + magicsync +
1505 (j * ETH_ALEN) + k] = macaddr[k];
1506 len++;
1507 set_bit(len, (unsigned long *) pmask);
1508 }
1509 }
1510 return len - 1;
1511}
1512
1513/* Setup magic packet patterns in the b44 WOL
1514 * pattern matching filter.
1515 */
1516static void b44_setup_pseudo_magicp(struct b44 *bp)
1517{
1518
1519 u32 val;
1520 int plen0, plen1, plen2;
1521 u8 *pwol_pattern;
1522 u8 pwol_mask[B44_PMASK_SIZE];
1523
1524 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1525 if (!pwol_pattern) {
1526 printk(KERN_ERR PFX "Memory not available for WOL\n");
1527 return;
1528 }
1529
1530 /* Ipv4 magic packet pattern - pattern 0.*/
1531 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1532 memset(pwol_mask, 0, B44_PMASK_SIZE);
1533 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1534 B44_ETHIPV4UDP_HLEN);
1535
1536 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1537 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1538
1539 /* Raw ethernet II magic packet pattern - pattern 1 */
1540 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1541 memset(pwol_mask, 0, B44_PMASK_SIZE);
1542 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1543 ETH_HLEN);
1544
1545 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1546 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1547 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1548 B44_PMASK_BASE + B44_PMASK_SIZE);
1549
1550 /* Ipv6 magic packet pattern - pattern 2 */
1551 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1552 memset(pwol_mask, 0, B44_PMASK_SIZE);
1553 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1554 B44_ETHIPV6UDP_HLEN);
1555
1556 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1557 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1558 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1559 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1560
1561 kfree(pwol_pattern);
1562
1563 /* set these pattern's lengths: one less than each real length */
1564 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1565 bw32(bp, B44_WKUP_LEN, val);
1566
1567 /* enable wakeup pattern matching */
1568 val = br32(bp, B44_DEVCTRL);
1569 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1570
1571}
52cafd96
GZ
1572
1573static void b44_setup_wol(struct b44 *bp)
1574{
1575 u32 val;
1576 u16 pmval;
1577
1578 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1579
1580 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1581
1582 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1583
1584 val = bp->dev->dev_addr[2] << 24 |
1585 bp->dev->dev_addr[3] << 16 |
1586 bp->dev->dev_addr[4] << 8 |
1587 bp->dev->dev_addr[5];
1588 bw32(bp, B44_ADDR_LO, val);
1589
1590 val = bp->dev->dev_addr[0] << 8 |
1591 bp->dev->dev_addr[1];
1592 bw32(bp, B44_ADDR_HI, val);
1593
1594 val = br32(bp, B44_DEVCTRL);
1595 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1596
725ad800
GZ
1597 } else {
1598 b44_setup_pseudo_magicp(bp);
1599 }
52cafd96
GZ
1600
1601 val = br32(bp, B44_SBTMSLOW);
1602 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1603
1604 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1605 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1606
1607}
1608
1da177e4
LT
1609static int b44_close(struct net_device *dev)
1610{
1611 struct b44 *bp = netdev_priv(dev);
1612
1613 netif_stop_queue(dev);
1614
ba5eec9c
FR
1615 netif_poll_disable(dev);
1616
1da177e4
LT
1617 del_timer_sync(&bp->timer);
1618
1619 spin_lock_irq(&bp->lock);
1620
1621#if 0
1622 b44_dump_state(bp);
1623#endif
1624 b44_halt(bp);
1625 b44_free_rings(bp);
c35ca399 1626 netif_carrier_off(dev);
1da177e4
LT
1627
1628 spin_unlock_irq(&bp->lock);
1629
1630 free_irq(dev->irq, dev);
1631
ba5eec9c
FR
1632 netif_poll_enable(dev);
1633
52cafd96 1634 if (bp->flags & B44_FLAG_WOL_ENABLE) {
5fc7d61a 1635 b44_init_hw(bp, B44_PARTIAL_RESET);
52cafd96
GZ
1636 b44_setup_wol(bp);
1637 }
1638
1da177e4
LT
1639 b44_free_consistent(bp);
1640
1641 return 0;
1642}
1643
1644static struct net_device_stats *b44_get_stats(struct net_device *dev)
1645{
1646 struct b44 *bp = netdev_priv(dev);
1647 struct net_device_stats *nstat = &bp->stats;
1648 struct b44_hw_stats *hwstat = &bp->hw_stats;
1649
1650 /* Convert HW stats into netdevice stats. */
1651 nstat->rx_packets = hwstat->rx_pkts;
1652 nstat->tx_packets = hwstat->tx_pkts;
1653 nstat->rx_bytes = hwstat->rx_octets;
1654 nstat->tx_bytes = hwstat->tx_octets;
1655 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1656 hwstat->tx_oversize_pkts +
1657 hwstat->tx_underruns +
1658 hwstat->tx_excessive_cols +
1659 hwstat->tx_late_cols);
1660 nstat->multicast = hwstat->tx_multicast_pkts;
1661 nstat->collisions = hwstat->tx_total_cols;
1662
1663 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1664 hwstat->rx_undersize);
1665 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1666 nstat->rx_frame_errors = hwstat->rx_align_errs;
1667 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1668 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1669 hwstat->rx_oversize_pkts +
1670 hwstat->rx_missed_pkts +
1671 hwstat->rx_crc_align_errs +
1672 hwstat->rx_undersize +
1673 hwstat->rx_crc_errs +
1674 hwstat->rx_align_errs +
1675 hwstat->rx_symbol_errs);
1676
1677 nstat->tx_aborted_errors = hwstat->tx_underruns;
1678#if 0
1679 /* Carrier lost counter seems to be broken for some devices */
1680 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1681#endif
1682
1683 return nstat;
1684}
1685
1686static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1687{
1688 struct dev_mc_list *mclist;
1689 int i, num_ents;
1690
1691 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1692 mclist = dev->mc_list;
1693 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1694 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1695 }
1696 return i+1;
1697}
1698
1699static void __b44_set_rx_mode(struct net_device *dev)
1700{
1701 struct b44 *bp = netdev_priv(dev);
1702 u32 val;
1da177e4
LT
1703
1704 val = br32(bp, B44_RXCONFIG);
1705 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1706 if (dev->flags & IFF_PROMISC) {
1707 val |= RXCONFIG_PROMISC;
1708 bw32(bp, B44_RXCONFIG, val);
1709 } else {
874a6214 1710 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
cda22aa9 1711 int i = 1;
874a6214 1712
1da177e4
LT
1713 __b44_set_mac_addr(bp);
1714
2f614fe0
JG
1715 if ((dev->flags & IFF_ALLMULTI) ||
1716 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1da177e4
LT
1717 val |= RXCONFIG_ALLMULTI;
1718 else
874a6214 1719 i = __b44_load_mcast(bp, dev);
10badc21 1720
2f614fe0 1721 for (; i < 64; i++)
10badc21 1722 __b44_cam_write(bp, zero, i);
2f614fe0 1723
1da177e4
LT
1724 bw32(bp, B44_RXCONFIG, val);
1725 val = br32(bp, B44_CAM_CTRL);
1726 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1727 }
1728}
1729
1730static void b44_set_rx_mode(struct net_device *dev)
1731{
1732 struct b44 *bp = netdev_priv(dev);
1733
1734 spin_lock_irq(&bp->lock);
1735 __b44_set_rx_mode(dev);
1736 spin_unlock_irq(&bp->lock);
1737}
1738
1739static u32 b44_get_msglevel(struct net_device *dev)
1740{
1741 struct b44 *bp = netdev_priv(dev);
1742 return bp->msg_enable;
1743}
1744
1745static void b44_set_msglevel(struct net_device *dev, u32 value)
1746{
1747 struct b44 *bp = netdev_priv(dev);
1748 bp->msg_enable = value;
1749}
1750
1751static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1752{
1753 struct b44 *bp = netdev_priv(dev);
1754 struct pci_dev *pci_dev = bp->pdev;
1755
1756 strcpy (info->driver, DRV_MODULE_NAME);
1757 strcpy (info->version, DRV_MODULE_VERSION);
1758 strcpy (info->bus_info, pci_name(pci_dev));
1759}
1760
1761static int b44_nway_reset(struct net_device *dev)
1762{
1763 struct b44 *bp = netdev_priv(dev);
1764 u32 bmcr;
1765 int r;
1766
1767 spin_lock_irq(&bp->lock);
1768 b44_readphy(bp, MII_BMCR, &bmcr);
1769 b44_readphy(bp, MII_BMCR, &bmcr);
1770 r = -EINVAL;
1771 if (bmcr & BMCR_ANENABLE) {
1772 b44_writephy(bp, MII_BMCR,
1773 bmcr | BMCR_ANRESTART);
1774 r = 0;
1775 }
1776 spin_unlock_irq(&bp->lock);
1777
1778 return r;
1779}
1780
1781static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1782{
1783 struct b44 *bp = netdev_priv(dev);
1784
1da177e4
LT
1785 cmd->supported = (SUPPORTED_Autoneg);
1786 cmd->supported |= (SUPPORTED_100baseT_Half |
1787 SUPPORTED_100baseT_Full |
1788 SUPPORTED_10baseT_Half |
1789 SUPPORTED_10baseT_Full |
1790 SUPPORTED_MII);
1791
1792 cmd->advertising = 0;
1793 if (bp->flags & B44_FLAG_ADV_10HALF)
adf6e000 1794 cmd->advertising |= ADVERTISED_10baseT_Half;
1da177e4 1795 if (bp->flags & B44_FLAG_ADV_10FULL)
adf6e000 1796 cmd->advertising |= ADVERTISED_10baseT_Full;
1da177e4 1797 if (bp->flags & B44_FLAG_ADV_100HALF)
adf6e000 1798 cmd->advertising |= ADVERTISED_100baseT_Half;
1da177e4 1799 if (bp->flags & B44_FLAG_ADV_100FULL)
adf6e000
MW
1800 cmd->advertising |= ADVERTISED_100baseT_Full;
1801 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1da177e4
LT
1802 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1803 SPEED_100 : SPEED_10;
1804 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1805 DUPLEX_FULL : DUPLEX_HALF;
1806 cmd->port = 0;
1807 cmd->phy_address = bp->phy_addr;
1808 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1809 XCVR_INTERNAL : XCVR_EXTERNAL;
1810 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1811 AUTONEG_DISABLE : AUTONEG_ENABLE;
47b9c3b1
GZ
1812 if (cmd->autoneg == AUTONEG_ENABLE)
1813 cmd->advertising |= ADVERTISED_Autoneg;
1814 if (!netif_running(dev)){
1815 cmd->speed = 0;
1816 cmd->duplex = 0xff;
1817 }
1da177e4
LT
1818 cmd->maxtxpkt = 0;
1819 cmd->maxrxpkt = 0;
1820 return 0;
1821}
1822
1823static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1824{
1825 struct b44 *bp = netdev_priv(dev);
1826
1da177e4
LT
1827 /* We do not support gigabit. */
1828 if (cmd->autoneg == AUTONEG_ENABLE) {
1829 if (cmd->advertising &
1830 (ADVERTISED_1000baseT_Half |
1831 ADVERTISED_1000baseT_Full))
1832 return -EINVAL;
1833 } else if ((cmd->speed != SPEED_100 &&
1834 cmd->speed != SPEED_10) ||
1835 (cmd->duplex != DUPLEX_HALF &&
1836 cmd->duplex != DUPLEX_FULL)) {
1837 return -EINVAL;
1838 }
1839
1840 spin_lock_irq(&bp->lock);
1841
1842 if (cmd->autoneg == AUTONEG_ENABLE) {
47b9c3b1
GZ
1843 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1844 B44_FLAG_100_BASE_T |
1845 B44_FLAG_FULL_DUPLEX |
1846 B44_FLAG_ADV_10HALF |
1da177e4
LT
1847 B44_FLAG_ADV_10FULL |
1848 B44_FLAG_ADV_100HALF |
1849 B44_FLAG_ADV_100FULL);
47b9c3b1
GZ
1850 if (cmd->advertising == 0) {
1851 bp->flags |= (B44_FLAG_ADV_10HALF |
1852 B44_FLAG_ADV_10FULL |
1853 B44_FLAG_ADV_100HALF |
1854 B44_FLAG_ADV_100FULL);
1855 } else {
1856 if (cmd->advertising & ADVERTISED_10baseT_Half)
1857 bp->flags |= B44_FLAG_ADV_10HALF;
1858 if (cmd->advertising & ADVERTISED_10baseT_Full)
1859 bp->flags |= B44_FLAG_ADV_10FULL;
1860 if (cmd->advertising & ADVERTISED_100baseT_Half)
1861 bp->flags |= B44_FLAG_ADV_100HALF;
1862 if (cmd->advertising & ADVERTISED_100baseT_Full)
1863 bp->flags |= B44_FLAG_ADV_100FULL;
1864 }
1da177e4
LT
1865 } else {
1866 bp->flags |= B44_FLAG_FORCE_LINK;
47b9c3b1 1867 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1da177e4
LT
1868 if (cmd->speed == SPEED_100)
1869 bp->flags |= B44_FLAG_100_BASE_T;
1870 if (cmd->duplex == DUPLEX_FULL)
1871 bp->flags |= B44_FLAG_FULL_DUPLEX;
1872 }
1873
47b9c3b1
GZ
1874 if (netif_running(dev))
1875 b44_setup_phy(bp);
1da177e4
LT
1876
1877 spin_unlock_irq(&bp->lock);
1878
1879 return 0;
1880}
1881
1882static void b44_get_ringparam(struct net_device *dev,
1883 struct ethtool_ringparam *ering)
1884{
1885 struct b44 *bp = netdev_priv(dev);
1886
1887 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1888 ering->rx_pending = bp->rx_pending;
1889
1890 /* XXX ethtool lacks a tx_max_pending, oops... */
1891}
1892
1893static int b44_set_ringparam(struct net_device *dev,
1894 struct ethtool_ringparam *ering)
1895{
1896 struct b44 *bp = netdev_priv(dev);
1897
1898 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1899 (ering->rx_mini_pending != 0) ||
1900 (ering->rx_jumbo_pending != 0) ||
1901 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1902 return -EINVAL;
1903
1904 spin_lock_irq(&bp->lock);
1905
1906 bp->rx_pending = ering->rx_pending;
1907 bp->tx_pending = ering->tx_pending;
1908
1909 b44_halt(bp);
1910 b44_init_rings(bp);
5fc7d61a 1911 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1912 netif_wake_queue(bp->dev);
1913 spin_unlock_irq(&bp->lock);
1914
1915 b44_enable_ints(bp);
10badc21 1916
1da177e4
LT
1917 return 0;
1918}
1919
1920static void b44_get_pauseparam(struct net_device *dev,
1921 struct ethtool_pauseparam *epause)
1922{
1923 struct b44 *bp = netdev_priv(dev);
1924
1925 epause->autoneg =
1926 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1927 epause->rx_pause =
1928 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1929 epause->tx_pause =
1930 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1931}
1932
1933static int b44_set_pauseparam(struct net_device *dev,
1934 struct ethtool_pauseparam *epause)
1935{
1936 struct b44 *bp = netdev_priv(dev);
1937
1938 spin_lock_irq(&bp->lock);
1939 if (epause->autoneg)
1940 bp->flags |= B44_FLAG_PAUSE_AUTO;
1941 else
1942 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1943 if (epause->rx_pause)
1944 bp->flags |= B44_FLAG_RX_PAUSE;
1945 else
1946 bp->flags &= ~B44_FLAG_RX_PAUSE;
1947 if (epause->tx_pause)
1948 bp->flags |= B44_FLAG_TX_PAUSE;
1949 else
1950 bp->flags &= ~B44_FLAG_TX_PAUSE;
1951 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1952 b44_halt(bp);
1953 b44_init_rings(bp);
5fc7d61a 1954 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1955 } else {
1956 __b44_set_flow_ctrl(bp, bp->flags);
1957 }
1958 spin_unlock_irq(&bp->lock);
1959
1960 b44_enable_ints(bp);
10badc21 1961
1da177e4
LT
1962 return 0;
1963}
1964
3353930d
FR
1965static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1966{
1967 switch(stringset) {
1968 case ETH_SS_STATS:
1969 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1970 break;
1971 }
1972}
1973
1974static int b44_get_stats_count(struct net_device *dev)
1975{
1976 return ARRAY_SIZE(b44_gstrings);
1977}
1978
1979static void b44_get_ethtool_stats(struct net_device *dev,
1980 struct ethtool_stats *stats, u64 *data)
1981{
1982 struct b44 *bp = netdev_priv(dev);
1983 u32 *val = &bp->hw_stats.tx_good_octets;
1984 u32 i;
1985
1986 spin_lock_irq(&bp->lock);
1987
1988 b44_stats_update(bp);
1989
1990 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1991 *data++ = *val++;
1992
1993 spin_unlock_irq(&bp->lock);
1994}
1995
52cafd96
GZ
1996static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1997{
1998 struct b44 *bp = netdev_priv(dev);
1999
2000 wol->supported = WAKE_MAGIC;
2001 if (bp->flags & B44_FLAG_WOL_ENABLE)
2002 wol->wolopts = WAKE_MAGIC;
2003 else
2004 wol->wolopts = 0;
2005 memset(&wol->sopass, 0, sizeof(wol->sopass));
2006}
2007
2008static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2009{
2010 struct b44 *bp = netdev_priv(dev);
2011
2012 spin_lock_irq(&bp->lock);
2013 if (wol->wolopts & WAKE_MAGIC)
2014 bp->flags |= B44_FLAG_WOL_ENABLE;
2015 else
2016 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2017 spin_unlock_irq(&bp->lock);
2018
2019 return 0;
2020}
2021
7282d491 2022static const struct ethtool_ops b44_ethtool_ops = {
1da177e4
LT
2023 .get_drvinfo = b44_get_drvinfo,
2024 .get_settings = b44_get_settings,
2025 .set_settings = b44_set_settings,
2026 .nway_reset = b44_nway_reset,
2027 .get_link = ethtool_op_get_link,
52cafd96
GZ
2028 .get_wol = b44_get_wol,
2029 .set_wol = b44_set_wol,
1da177e4
LT
2030 .get_ringparam = b44_get_ringparam,
2031 .set_ringparam = b44_set_ringparam,
2032 .get_pauseparam = b44_get_pauseparam,
2033 .set_pauseparam = b44_set_pauseparam,
2034 .get_msglevel = b44_get_msglevel,
2035 .set_msglevel = b44_set_msglevel,
3353930d
FR
2036 .get_strings = b44_get_strings,
2037 .get_stats_count = b44_get_stats_count,
2038 .get_ethtool_stats = b44_get_ethtool_stats,
2160de53 2039 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
2040};
2041
2042static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2043{
2044 struct mii_ioctl_data *data = if_mii(ifr);
2045 struct b44 *bp = netdev_priv(dev);
3410572d
FR
2046 int err = -EINVAL;
2047
2048 if (!netif_running(dev))
2049 goto out;
1da177e4
LT
2050
2051 spin_lock_irq(&bp->lock);
2052 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2053 spin_unlock_irq(&bp->lock);
3410572d 2054out:
1da177e4
LT
2055 return err;
2056}
2057
2058/* Read 128-bytes of EEPROM. */
2059static int b44_read_eeprom(struct b44 *bp, u8 *data)
2060{
2061 long i;
a7bed27d 2062 __le16 *ptr = (__le16 *) data;
1da177e4
LT
2063
2064 for (i = 0; i < 128; i += 2)
6f627683 2065 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
1da177e4
LT
2066
2067 return 0;
2068}
2069
2070static int __devinit b44_get_invariants(struct b44 *bp)
2071{
2072 u8 eeprom[128];
2073 int err;
2074
2075 err = b44_read_eeprom(bp, &eeprom[0]);
2076 if (err)
2077 goto out;
2078
2079 bp->dev->dev_addr[0] = eeprom[79];
2080 bp->dev->dev_addr[1] = eeprom[78];
2081 bp->dev->dev_addr[2] = eeprom[81];
2082 bp->dev->dev_addr[3] = eeprom[80];
2083 bp->dev->dev_addr[4] = eeprom[83];
2084 bp->dev->dev_addr[5] = eeprom[82];
391fc09a
GZ
2085
2086 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2087 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2088 return -EINVAL;
2089 }
2090
2160de53 2091 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1da177e4
LT
2092
2093 bp->phy_addr = eeprom[90] & 0x1f;
2094
2095 /* With this, plus the rx_header prepended to the data by the
2096 * hardware, we'll land the ethernet header on a 2-byte boundary.
2097 */
2098 bp->rx_offset = 30;
2099
2100 bp->imask = IMASK_DEF;
2101
2102 bp->core_unit = ssb_core_unit(bp);
2103 bp->dma_offset = SB_PCI_DMA;
2104
10badc21 2105 /* XXX - really required?
1da177e4
LT
2106 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2107 */
52cafd96
GZ
2108
2109 if (ssb_get_core_rev(bp) >= 7)
2110 bp->flags |= B44_FLAG_B0_ANDLATER;
2111
1da177e4
LT
2112out:
2113 return err;
2114}
2115
2116static int __devinit b44_init_one(struct pci_dev *pdev,
2117 const struct pci_device_id *ent)
2118{
2119 static int b44_version_printed = 0;
2120 unsigned long b44reg_base, b44reg_len;
2121 struct net_device *dev;
2122 struct b44 *bp;
2123 int err, i;
2124
2125 if (b44_version_printed++ == 0)
2126 printk(KERN_INFO "%s", version);
2127
2128 err = pci_enable_device(pdev);
2129 if (err) {
9b91cf9d 2130 dev_err(&pdev->dev, "Cannot enable PCI device, "
1da177e4
LT
2131 "aborting.\n");
2132 return err;
2133 }
2134
2135 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 2136 dev_err(&pdev->dev,
2e8a538d 2137 "Cannot find proper PCI device "
1da177e4
LT
2138 "base address, aborting.\n");
2139 err = -ENODEV;
2140 goto err_out_disable_pdev;
2141 }
2142
2143 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2144 if (err) {
9b91cf9d 2145 dev_err(&pdev->dev,
2e8a538d 2146 "Cannot obtain PCI resources, aborting.\n");
1da177e4
LT
2147 goto err_out_disable_pdev;
2148 }
2149
2150 pci_set_master(pdev);
2151
97db9ee7 2152 err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1da177e4 2153 if (err) {
9b91cf9d 2154 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
1da177e4
LT
2155 goto err_out_free_res;
2156 }
10badc21 2157
97db9ee7 2158 err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1da177e4 2159 if (err) {
9b91cf9d 2160 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
874a6214 2161 goto err_out_free_res;
1da177e4
LT
2162 }
2163
2164 b44reg_base = pci_resource_start(pdev, 0);
2165 b44reg_len = pci_resource_len(pdev, 0);
2166
2167 dev = alloc_etherdev(sizeof(*bp));
2168 if (!dev) {
9b91cf9d 2169 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
1da177e4
LT
2170 err = -ENOMEM;
2171 goto err_out_free_res;
2172 }
2173
2174 SET_MODULE_OWNER(dev);
2175 SET_NETDEV_DEV(dev,&pdev->dev);
2176
2177 /* No interesting netdevice features in this card... */
2178 dev->features |= 0;
2179
2180 bp = netdev_priv(dev);
2181 bp->pdev = pdev;
2182 bp->dev = dev;
874a6214
FR
2183
2184 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1da177e4
LT
2185
2186 spin_lock_init(&bp->lock);
2187
2188 bp->regs = ioremap(b44reg_base, b44reg_len);
2189 if (bp->regs == 0UL) {
9b91cf9d 2190 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
1da177e4
LT
2191 err = -ENOMEM;
2192 goto err_out_free_dev;
2193 }
2194
2195 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2196 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2197
2198 dev->open = b44_open;
2199 dev->stop = b44_close;
2200 dev->hard_start_xmit = b44_start_xmit;
2201 dev->get_stats = b44_get_stats;
2202 dev->set_multicast_list = b44_set_rx_mode;
2203 dev->set_mac_address = b44_set_mac_addr;
2204 dev->do_ioctl = b44_ioctl;
2205 dev->tx_timeout = b44_tx_timeout;
2206 dev->poll = b44_poll;
2207 dev->weight = 64;
2208 dev->watchdog_timeo = B44_TX_TIMEOUT;
2209#ifdef CONFIG_NET_POLL_CONTROLLER
2210 dev->poll_controller = b44_poll_controller;
2211#endif
2212 dev->change_mtu = b44_change_mtu;
2213 dev->irq = pdev->irq;
2214 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2215
c35ca399
SH
2216 netif_carrier_off(dev);
2217
1da177e4
LT
2218 err = b44_get_invariants(bp);
2219 if (err) {
9b91cf9d 2220 dev_err(&pdev->dev,
2e8a538d 2221 "Problem fetching invariants of chip, aborting.\n");
1da177e4
LT
2222 goto err_out_iounmap;
2223 }
2224
2225 bp->mii_if.dev = dev;
2226 bp->mii_if.mdio_read = b44_mii_read;
2227 bp->mii_if.mdio_write = b44_mii_write;
2228 bp->mii_if.phy_id = bp->phy_addr;
2229 bp->mii_if.phy_id_mask = 0x1f;
2230 bp->mii_if.reg_num_mask = 0x1f;
2231
2232 /* By default, advertise all speed/duplex settings. */
2233 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2234 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2235
2236 /* By default, auto-negotiate PAUSE. */
2237 bp->flags |= B44_FLAG_PAUSE_AUTO;
2238
2239 err = register_netdev(dev);
2240 if (err) {
9b91cf9d 2241 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1da177e4
LT
2242 goto err_out_iounmap;
2243 }
2244
2245 pci_set_drvdata(pdev, dev);
2246
2247 pci_save_state(bp->pdev);
2248
10badc21 2249 /* Chip reset provides power to the b44 MAC & PCI cores, which
5c513129 2250 * is necessary for MAC register access.
10badc21 2251 */
5c513129
GZ
2252 b44_chip_reset(bp);
2253
1da177e4
LT
2254 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2255 for (i = 0; i < 6; i++)
2256 printk("%2.2x%c", dev->dev_addr[i],
2257 i == 5 ? '\n' : ':');
2258
2259 return 0;
2260
2261err_out_iounmap:
2262 iounmap(bp->regs);
2263
2264err_out_free_dev:
2265 free_netdev(dev);
2266
2267err_out_free_res:
2268 pci_release_regions(pdev);
2269
2270err_out_disable_pdev:
2271 pci_disable_device(pdev);
2272 pci_set_drvdata(pdev, NULL);
2273 return err;
2274}
2275
2276static void __devexit b44_remove_one(struct pci_dev *pdev)
2277{
2278 struct net_device *dev = pci_get_drvdata(pdev);
874a6214 2279 struct b44 *bp = netdev_priv(dev);
1da177e4 2280
874a6214
FR
2281 unregister_netdev(dev);
2282 iounmap(bp->regs);
2283 free_netdev(dev);
2284 pci_release_regions(pdev);
2285 pci_disable_device(pdev);
2286 pci_set_drvdata(pdev, NULL);
1da177e4
LT
2287}
2288
2289static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2290{
2291 struct net_device *dev = pci_get_drvdata(pdev);
2292 struct b44 *bp = netdev_priv(dev);
2293
2294 if (!netif_running(dev))
2295 return 0;
2296
2297 del_timer_sync(&bp->timer);
2298
10badc21 2299 spin_lock_irq(&bp->lock);
1da177e4
LT
2300
2301 b44_halt(bp);
10badc21 2302 netif_carrier_off(bp->dev);
1da177e4
LT
2303 netif_device_detach(bp->dev);
2304 b44_free_rings(bp);
2305
2306 spin_unlock_irq(&bp->lock);
46e17853
PM
2307
2308 free_irq(dev->irq, dev);
52cafd96 2309 if (bp->flags & B44_FLAG_WOL_ENABLE) {
5fc7d61a 2310 b44_init_hw(bp, B44_PARTIAL_RESET);
52cafd96
GZ
2311 b44_setup_wol(bp);
2312 }
d58da590 2313 pci_disable_device(pdev);
1da177e4
LT
2314 return 0;
2315}
2316
2317static int b44_resume(struct pci_dev *pdev)
2318{
2319 struct net_device *dev = pci_get_drvdata(pdev);
2320 struct b44 *bp = netdev_priv(dev);
90afd0e5 2321 int rc = 0;
1da177e4
LT
2322
2323 pci_restore_state(pdev);
90afd0e5
DM
2324 rc = pci_enable_device(pdev);
2325 if (rc) {
2326 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2327 dev->name);
2328 return rc;
2329 }
2330
d58da590 2331 pci_set_master(pdev);
1da177e4
LT
2332
2333 if (!netif_running(dev))
2334 return 0;
2335
90afd0e5
DM
2336 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2337 if (rc) {
46e17853 2338 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
90afd0e5
DM
2339 pci_disable_device(pdev);
2340 return rc;
2341 }
46e17853 2342
1da177e4
LT
2343 spin_lock_irq(&bp->lock);
2344
2345 b44_init_rings(bp);
5fc7d61a 2346 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
2347 netif_device_attach(bp->dev);
2348 spin_unlock_irq(&bp->lock);
2349
1da177e4 2350 b44_enable_ints(bp);
d9e2d185 2351 netif_wake_queue(dev);
a72a8179
SH
2352
2353 mod_timer(&bp->timer, jiffies + 1);
2354
1da177e4
LT
2355 return 0;
2356}
2357
2358static struct pci_driver b44_driver = {
2359 .name = DRV_MODULE_NAME,
2360 .id_table = b44_pci_tbl,
2361 .probe = b44_init_one,
2362 .remove = __devexit_p(b44_remove_one),
2363 .suspend = b44_suspend,
2364 .resume = b44_resume,
2365};
2366
2367static int __init b44_init(void)
2368{
9f38c636
JL
2369 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2370
2371 /* Setup paramaters for syncing RX/TX DMA descriptors */
2372 dma_desc_align_mask = ~(dma_desc_align_size - 1);
22d4d771 2373 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
9f38c636 2374
29917620 2375 return pci_register_driver(&b44_driver);
1da177e4
LT
2376}
2377
2378static void __exit b44_cleanup(void)
2379{
2380 pci_unregister_driver(&b44_driver);
2381}
2382
2383module_init(b44_init);
2384module_exit(b44_cleanup);
2385