b44: replace define
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / b44.c
CommitLineData
1da177e4
LT
1/* b44.c: Broadcom 4400 device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
8056bfaf 5 * Copyright (C) 2006 Broadcom Corporation.
1da177e4
LT
6 *
7 * Distribute under GPL.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/types.h>
14#include <linux/netdevice.h>
15#include <linux/ethtool.h>
16#include <linux/mii.h>
17#include <linux/if_ether.h>
18#include <linux/etherdevice.h>
19#include <linux/pci.h>
20#include <linux/delay.h>
21#include <linux/init.h>
89358f90 22#include <linux/dma-mapping.h>
1da177e4
LT
23
24#include <asm/uaccess.h>
25#include <asm/io.h>
26#include <asm/irq.h>
27
28#include "b44.h"
29
30#define DRV_MODULE_NAME "b44"
31#define PFX DRV_MODULE_NAME ": "
4d1dabdb
GZ
32#define DRV_MODULE_VERSION "1.01"
33#define DRV_MODULE_RELDATE "Jun 16, 2006"
1da177e4
LT
34
35#define B44_DEF_MSG_ENABLE \
36 (NETIF_MSG_DRV | \
37 NETIF_MSG_PROBE | \
38 NETIF_MSG_LINK | \
39 NETIF_MSG_TIMER | \
40 NETIF_MSG_IFDOWN | \
41 NETIF_MSG_IFUP | \
42 NETIF_MSG_RX_ERR | \
43 NETIF_MSG_TX_ERR)
44
45/* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
47 */
48#define B44_TX_TIMEOUT (5 * HZ)
49
50/* hardware minimum and maximum for a single frame's data payload */
51#define B44_MIN_MTU 60
52#define B44_MAX_MTU 1500
53
54#define B44_RX_RING_SIZE 512
55#define B44_DEF_RX_RING_PENDING 200
56#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
57 B44_RX_RING_SIZE)
58#define B44_TX_RING_SIZE 512
59#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
61 B44_TX_RING_SIZE)
1da177e4
LT
62
63#define TX_RING_GAP(BP) \
64 (B44_TX_RING_SIZE - (BP)->tx_pending)
65#define TX_BUFFS_AVAIL(BP) \
66 (((BP)->tx_cons <= (BP)->tx_prod) ? \
67 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
68 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
69#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
70
71#define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
72#define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
73
74/* minimum number of free TX descriptors required to wake up TX process */
75#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
76
725ad800
GZ
77/* b44 internal pattern match filter info */
78#define B44_PATTERN_BASE 0x400
79#define B44_PATTERN_SIZE 0x80
80#define B44_PMASK_BASE 0x600
81#define B44_PMASK_SIZE 0x10
82#define B44_MAX_PATTERNS 16
83#define B44_ETHIPV6UDP_HLEN 62
84#define B44_ETHIPV4UDP_HLEN 42
85
1da177e4
LT
86static char version[] __devinitdata =
87 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
88
89MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
90MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
91MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_MODULE_VERSION);
93
94static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
95module_param(b44_debug, int, 0);
96MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
97
98static struct pci_device_id b44_pci_tbl[] = {
99 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
100 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
101 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
102 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
103 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
104 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
105 { } /* terminate list with empty entry */
106};
107
108MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
109
110static void b44_halt(struct b44 *);
111static void b44_init_rings(struct b44 *);
5fc7d61a
MC
112
113#define B44_FULL_RESET 1
114#define B44_FULL_RESET_SKIP_PHY 2
115#define B44_PARTIAL_RESET 3
116
00e8b3aa 117static void b44_init_hw(struct b44 *, int);
1da177e4 118
9f38c636
JL
119static int dma_desc_align_mask;
120static int dma_desc_sync_size;
121
3353930d
FR
122static const char b44_gstrings[][ETH_GSTRING_LEN] = {
123#define _B44(x...) # x,
124B44_STAT_REG_DECLARE
125#undef _B44
126};
127
9f38c636
JL
128static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
129 dma_addr_t dma_base,
130 unsigned long offset,
131 enum dma_data_direction dir)
132{
133 dma_sync_single_range_for_device(&pdev->dev, dma_base,
134 offset & dma_desc_align_mask,
135 dma_desc_sync_size, dir);
136}
137
138static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
139 dma_addr_t dma_base,
140 unsigned long offset,
141 enum dma_data_direction dir)
142{
143 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
144 offset & dma_desc_align_mask,
145 dma_desc_sync_size, dir);
146}
147
1da177e4
LT
148static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
149{
150 return readl(bp->regs + reg);
151}
152
10badc21 153static inline void bw32(const struct b44 *bp,
1da177e4
LT
154 unsigned long reg, unsigned long val)
155{
156 writel(val, bp->regs + reg);
157}
158
159static int b44_wait_bit(struct b44 *bp, unsigned long reg,
160 u32 bit, unsigned long timeout, const int clear)
161{
162 unsigned long i;
163
164 for (i = 0; i < timeout; i++) {
165 u32 val = br32(bp, reg);
166
167 if (clear && !(val & bit))
168 break;
169 if (!clear && (val & bit))
170 break;
171 udelay(10);
172 }
173 if (i == timeout) {
174 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
175 "%lx to %s.\n",
176 bp->dev->name,
177 bit, reg,
178 (clear ? "clear" : "set"));
179 return -ENODEV;
180 }
181 return 0;
182}
183
184/* Sonics SiliconBackplane support routines. ROFL, you should see all the
185 * buzz words used on this company's website :-)
186 *
187 * All of these routines must be invoked with bp->lock held and
188 * interrupts disabled.
189 */
190
191#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
192#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
193
194static u32 ssb_get_core_rev(struct b44 *bp)
195{
196 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
197}
198
199static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
200{
201 u32 bar_orig, pci_rev, val;
202
203 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
204 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
205 pci_rev = ssb_get_core_rev(bp);
206
207 val = br32(bp, B44_SBINTVEC);
208 val |= cores;
209 bw32(bp, B44_SBINTVEC, val);
210
211 val = br32(bp, SSB_PCI_TRANS_2);
212 val |= SSB_PCI_PREF | SSB_PCI_BURST;
213 bw32(bp, SSB_PCI_TRANS_2, val);
214
215 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
216
217 return pci_rev;
218}
219
220static void ssb_core_disable(struct b44 *bp)
221{
222 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
223 return;
224
225 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
226 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
227 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
228 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
229 SBTMSLOW_REJECT | SBTMSLOW_RESET));
230 br32(bp, B44_SBTMSLOW);
231 udelay(1);
232 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
233 br32(bp, B44_SBTMSLOW);
234 udelay(1);
235}
236
237static void ssb_core_reset(struct b44 *bp)
238{
239 u32 val;
240
241 ssb_core_disable(bp);
242 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
243 br32(bp, B44_SBTMSLOW);
244 udelay(1);
245
246 /* Clear SERR if set, this is a hw bug workaround. */
247 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
248 bw32(bp, B44_SBTMSHIGH, 0);
249
250 val = br32(bp, B44_SBIMSTATE);
251 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
252 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
253
254 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
255 br32(bp, B44_SBTMSLOW);
256 udelay(1);
257
258 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
259 br32(bp, B44_SBTMSLOW);
260 udelay(1);
261}
262
263static int ssb_core_unit(struct b44 *bp)
264{
265#if 0
266 u32 val = br32(bp, B44_SBADMATCH0);
267 u32 base;
268
269 type = val & SBADMATCH0_TYPE_MASK;
270 switch (type) {
271 case 0:
272 base = val & SBADMATCH0_BS0_MASK;
273 break;
274
275 case 1:
276 base = val & SBADMATCH0_BS1_MASK;
277 break;
278
279 case 2:
280 default:
281 base = val & SBADMATCH0_BS2_MASK;
282 break;
283 };
284#endif
285 return 0;
286}
287
288static int ssb_is_core_up(struct b44 *bp)
289{
290 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
291 == SBTMSLOW_CLOCK);
292}
293
294static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
295{
296 u32 val;
297
298 val = ((u32) data[2]) << 24;
299 val |= ((u32) data[3]) << 16;
300 val |= ((u32) data[4]) << 8;
301 val |= ((u32) data[5]) << 0;
302 bw32(bp, B44_CAM_DATA_LO, val);
10badc21 303 val = (CAM_DATA_HI_VALID |
1da177e4
LT
304 (((u32) data[0]) << 8) |
305 (((u32) data[1]) << 0));
306 bw32(bp, B44_CAM_DATA_HI, val);
307 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
308 (index << CAM_CTRL_INDEX_SHIFT)));
10badc21 309 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
1da177e4
LT
310}
311
312static inline void __b44_disable_ints(struct b44 *bp)
313{
314 bw32(bp, B44_IMASK, 0);
315}
316
317static void b44_disable_ints(struct b44 *bp)
318{
319 __b44_disable_ints(bp);
320
321 /* Flush posted writes. */
322 br32(bp, B44_IMASK);
323}
324
325static void b44_enable_ints(struct b44 *bp)
326{
327 bw32(bp, B44_IMASK, bp->imask);
328}
329
330static int b44_readphy(struct b44 *bp, int reg, u32 *val)
331{
332 int err;
333
334 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
335 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
336 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
337 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
338 (reg << MDIO_DATA_RA_SHIFT) |
339 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
340 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
341 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
342
343 return err;
344}
345
346static int b44_writephy(struct b44 *bp, int reg, u32 val)
347{
348 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
349 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
350 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
351 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
352 (reg << MDIO_DATA_RA_SHIFT) |
353 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
354 (val & MDIO_DATA_DATA)));
355 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
356}
357
358/* miilib interface */
359/* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
360 * due to code existing before miilib use was added to this driver.
361 * Someone should remove this artificial driver limitation in
362 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
363 */
364static int b44_mii_read(struct net_device *dev, int phy_id, int location)
365{
366 u32 val;
367 struct b44 *bp = netdev_priv(dev);
368 int rc = b44_readphy(bp, location, &val);
369 if (rc)
370 return 0xffffffff;
371 return val;
372}
373
374static void b44_mii_write(struct net_device *dev, int phy_id, int location,
375 int val)
376{
377 struct b44 *bp = netdev_priv(dev);
378 b44_writephy(bp, location, val);
379}
380
381static int b44_phy_reset(struct b44 *bp)
382{
383 u32 val;
384 int err;
385
386 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
387 if (err)
388 return err;
389 udelay(100);
390 err = b44_readphy(bp, MII_BMCR, &val);
391 if (!err) {
392 if (val & BMCR_RESET) {
393 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
394 bp->dev->name);
395 err = -ENODEV;
396 }
397 }
398
399 return 0;
400}
401
402static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
403{
404 u32 val;
405
406 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
407 bp->flags |= pause_flags;
408
409 val = br32(bp, B44_RXCONFIG);
410 if (pause_flags & B44_FLAG_RX_PAUSE)
411 val |= RXCONFIG_FLOW;
412 else
413 val &= ~RXCONFIG_FLOW;
414 bw32(bp, B44_RXCONFIG, val);
415
416 val = br32(bp, B44_MAC_FLOW);
417 if (pause_flags & B44_FLAG_TX_PAUSE)
418 val |= (MAC_FLOW_PAUSE_ENAB |
419 (0xc0 & MAC_FLOW_RX_HI_WATER));
420 else
421 val &= ~MAC_FLOW_PAUSE_ENAB;
422 bw32(bp, B44_MAC_FLOW, val);
423}
424
425static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
426{
10badc21 427 u32 pause_enab = 0;
2b474cf5
GZ
428
429 /* The driver supports only rx pause by default because
10badc21
JG
430 the b44 mac tx pause mechanism generates excessive
431 pause frames.
2b474cf5
GZ
432 Use ethtool to turn on b44 tx pause if necessary.
433 */
434 if ((local & ADVERTISE_PAUSE_CAP) &&
10badc21 435 (local & ADVERTISE_PAUSE_ASYM)){
2b474cf5
GZ
436 if ((remote & LPA_PAUSE_ASYM) &&
437 !(remote & LPA_PAUSE_CAP))
438 pause_enab |= B44_FLAG_RX_PAUSE;
1da177e4
LT
439 }
440
441 __b44_set_flow_ctrl(bp, pause_enab);
442}
443
444static int b44_setup_phy(struct b44 *bp)
445{
446 u32 val;
447 int err;
448
449 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
450 goto out;
451 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
452 val & MII_ALEDCTRL_ALLMSK)) != 0)
453 goto out;
454 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
455 goto out;
456 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
457 val | MII_TLEDCTRL_ENABLE)) != 0)
458 goto out;
459
460 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
461 u32 adv = ADVERTISE_CSMA;
462
463 if (bp->flags & B44_FLAG_ADV_10HALF)
464 adv |= ADVERTISE_10HALF;
465 if (bp->flags & B44_FLAG_ADV_10FULL)
466 adv |= ADVERTISE_10FULL;
467 if (bp->flags & B44_FLAG_ADV_100HALF)
468 adv |= ADVERTISE_100HALF;
469 if (bp->flags & B44_FLAG_ADV_100FULL)
470 adv |= ADVERTISE_100FULL;
471
472 if (bp->flags & B44_FLAG_PAUSE_AUTO)
473 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
474
475 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
476 goto out;
477 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
478 BMCR_ANRESTART))) != 0)
479 goto out;
480 } else {
481 u32 bmcr;
482
483 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
484 goto out;
485 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
486 if (bp->flags & B44_FLAG_100_BASE_T)
487 bmcr |= BMCR_SPEED100;
488 if (bp->flags & B44_FLAG_FULL_DUPLEX)
489 bmcr |= BMCR_FULLDPLX;
490 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
491 goto out;
492
493 /* Since we will not be negotiating there is no safe way
494 * to determine if the link partner supports flow control
495 * or not. So just disable it completely in this case.
496 */
497 b44_set_flow_ctrl(bp, 0, 0);
498 }
499
500out:
501 return err;
502}
503
504static void b44_stats_update(struct b44 *bp)
505{
506 unsigned long reg;
507 u32 *val;
508
509 val = &bp->hw_stats.tx_good_octets;
510 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
511 *val++ += br32(bp, reg);
512 }
3353930d
FR
513
514 /* Pad */
515 reg += 8*4UL;
516
1da177e4
LT
517 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
518 *val++ += br32(bp, reg);
519 }
520}
521
522static void b44_link_report(struct b44 *bp)
523{
524 if (!netif_carrier_ok(bp->dev)) {
525 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
526 } else {
527 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
528 bp->dev->name,
529 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
530 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
531
532 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
533 "%s for RX.\n",
534 bp->dev->name,
535 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
536 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
537 }
538}
539
540static void b44_check_phy(struct b44 *bp)
541{
542 u32 bmsr, aux;
543
544 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
545 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
546 (bmsr != 0xffff)) {
547 if (aux & MII_AUXCTRL_SPEED)
548 bp->flags |= B44_FLAG_100_BASE_T;
549 else
550 bp->flags &= ~B44_FLAG_100_BASE_T;
551 if (aux & MII_AUXCTRL_DUPLEX)
552 bp->flags |= B44_FLAG_FULL_DUPLEX;
553 else
554 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
555
556 if (!netif_carrier_ok(bp->dev) &&
557 (bmsr & BMSR_LSTATUS)) {
558 u32 val = br32(bp, B44_TX_CTRL);
559 u32 local_adv, remote_adv;
560
561 if (bp->flags & B44_FLAG_FULL_DUPLEX)
562 val |= TX_CTRL_DUPLEX;
563 else
564 val &= ~TX_CTRL_DUPLEX;
565 bw32(bp, B44_TX_CTRL, val);
566
567 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
568 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
569 !b44_readphy(bp, MII_LPA, &remote_adv))
570 b44_set_flow_ctrl(bp, local_adv, remote_adv);
571
572 /* Link now up */
573 netif_carrier_on(bp->dev);
574 b44_link_report(bp);
575 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
576 /* Link now down */
577 netif_carrier_off(bp->dev);
578 b44_link_report(bp);
579 }
580
581 if (bmsr & BMSR_RFAULT)
582 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
583 bp->dev->name);
584 if (bmsr & BMSR_JCD)
585 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
586 bp->dev->name);
587 }
588}
589
590static void b44_timer(unsigned long __opaque)
591{
592 struct b44 *bp = (struct b44 *) __opaque;
593
594 spin_lock_irq(&bp->lock);
595
596 b44_check_phy(bp);
597
598 b44_stats_update(bp);
599
600 spin_unlock_irq(&bp->lock);
601
602 bp->timer.expires = jiffies + HZ;
603 add_timer(&bp->timer);
604}
605
606static void b44_tx(struct b44 *bp)
607{
608 u32 cur, cons;
609
610 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
611 cur /= sizeof(struct dma_desc);
612
613 /* XXX needs updating when NETIF_F_SG is supported */
614 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
615 struct ring_info *rp = &bp->tx_buffers[cons];
616 struct sk_buff *skb = rp->skb;
617
5d9428de 618 BUG_ON(skb == NULL);
1da177e4
LT
619
620 pci_unmap_single(bp->pdev,
621 pci_unmap_addr(rp, mapping),
622 skb->len,
623 PCI_DMA_TODEVICE);
624 rp->skb = NULL;
625 dev_kfree_skb_irq(skb);
626 }
627
628 bp->tx_cons = cons;
629 if (netif_queue_stopped(bp->dev) &&
630 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
631 netif_wake_queue(bp->dev);
632
633 bw32(bp, B44_GPTIMER, 0);
634}
635
636/* Works like this. This chip writes a 'struct rx_header" 30 bytes
637 * before the DMA address you give it. So we allocate 30 more bytes
638 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
639 * point the chip at 30 bytes past where the rx_header will go.
640 */
641static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
642{
643 struct dma_desc *dp;
644 struct ring_info *src_map, *map;
645 struct rx_header *rh;
646 struct sk_buff *skb;
647 dma_addr_t mapping;
648 int dest_idx;
649 u32 ctrl;
650
651 src_map = NULL;
652 if (src_idx >= 0)
653 src_map = &bp->rx_buffers[src_idx];
654 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
655 map = &bp->rx_buffers[dest_idx];
656 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
657 if (skb == NULL)
658 return -ENOMEM;
659
660 mapping = pci_map_single(bp->pdev, skb->data,
661 RX_PKT_BUF_SZ,
662 PCI_DMA_FROMDEVICE);
663
664 /* Hardware bug work-around, the chip is unable to do PCI DMA
665 to/from anything above 1GB :-( */
639b421b 666 if (dma_mapping_error(mapping) ||
97db9ee7 667 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
1da177e4 668 /* Sigh... */
639b421b
AK
669 if (!dma_mapping_error(mapping))
670 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
1da177e4
LT
671 dev_kfree_skb_any(skb);
672 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
673 if (skb == NULL)
674 return -ENOMEM;
675 mapping = pci_map_single(bp->pdev, skb->data,
676 RX_PKT_BUF_SZ,
677 PCI_DMA_FROMDEVICE);
639b421b 678 if (dma_mapping_error(mapping) ||
97db9ee7 679 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
639b421b
AK
680 if (!dma_mapping_error(mapping))
681 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
1da177e4
LT
682 dev_kfree_skb_any(skb);
683 return -ENOMEM;
684 }
685 }
686
687 skb->dev = bp->dev;
688 skb_reserve(skb, bp->rx_offset);
689
690 rh = (struct rx_header *)
691 (skb->data - bp->rx_offset);
692 rh->len = 0;
693 rh->flags = 0;
694
695 map->skb = skb;
696 pci_unmap_addr_set(map, mapping, mapping);
697
698 if (src_map != NULL)
699 src_map->skb = NULL;
700
701 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
702 if (dest_idx == (B44_RX_RING_SIZE - 1))
703 ctrl |= DESC_CTRL_EOT;
704
705 dp = &bp->rx_ring[dest_idx];
706 dp->ctrl = cpu_to_le32(ctrl);
707 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
708
9f38c636
JL
709 if (bp->flags & B44_FLAG_RX_RING_HACK)
710 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
711 dest_idx * sizeof(dp),
712 DMA_BIDIRECTIONAL);
713
1da177e4
LT
714 return RX_PKT_BUF_SZ;
715}
716
717static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
718{
719 struct dma_desc *src_desc, *dest_desc;
720 struct ring_info *src_map, *dest_map;
721 struct rx_header *rh;
722 int dest_idx;
a7bed27d 723 __le32 ctrl;
1da177e4
LT
724
725 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
726 dest_desc = &bp->rx_ring[dest_idx];
727 dest_map = &bp->rx_buffers[dest_idx];
728 src_desc = &bp->rx_ring[src_idx];
729 src_map = &bp->rx_buffers[src_idx];
730
731 dest_map->skb = src_map->skb;
732 rh = (struct rx_header *) src_map->skb->data;
733 rh->len = 0;
734 rh->flags = 0;
735 pci_unmap_addr_set(dest_map, mapping,
736 pci_unmap_addr(src_map, mapping));
737
9f38c636
JL
738 if (bp->flags & B44_FLAG_RX_RING_HACK)
739 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
740 src_idx * sizeof(src_desc),
741 DMA_BIDIRECTIONAL);
742
1da177e4
LT
743 ctrl = src_desc->ctrl;
744 if (dest_idx == (B44_RX_RING_SIZE - 1))
745 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
746 else
747 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
748
749 dest_desc->ctrl = ctrl;
750 dest_desc->addr = src_desc->addr;
9f38c636 751
1da177e4
LT
752 src_map->skb = NULL;
753
9f38c636
JL
754 if (bp->flags & B44_FLAG_RX_RING_HACK)
755 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
756 dest_idx * sizeof(dest_desc),
757 DMA_BIDIRECTIONAL);
758
00576e93 759 pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
1da177e4
LT
760 RX_PKT_BUF_SZ,
761 PCI_DMA_FROMDEVICE);
762}
763
764static int b44_rx(struct b44 *bp, int budget)
765{
766 int received;
767 u32 cons, prod;
768
769 received = 0;
770 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
771 prod /= sizeof(struct dma_desc);
772 cons = bp->rx_cons;
773
774 while (cons != prod && budget > 0) {
775 struct ring_info *rp = &bp->rx_buffers[cons];
776 struct sk_buff *skb = rp->skb;
777 dma_addr_t map = pci_unmap_addr(rp, mapping);
778 struct rx_header *rh;
779 u16 len;
780
781 pci_dma_sync_single_for_cpu(bp->pdev, map,
782 RX_PKT_BUF_SZ,
783 PCI_DMA_FROMDEVICE);
784 rh = (struct rx_header *) skb->data;
a7bed27d 785 len = le16_to_cpu(rh->len);
1da177e4
LT
786 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
787 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
788 drop_it:
789 b44_recycle_rx(bp, cons, bp->rx_prod);
790 drop_it_no_recycle:
791 bp->stats.rx_dropped++;
792 goto next_pkt;
793 }
794
795 if (len == 0) {
796 int i = 0;
797
798 do {
799 udelay(2);
800 barrier();
a7bed27d 801 len = le16_to_cpu(rh->len);
1da177e4
LT
802 } while (len == 0 && i++ < 5);
803 if (len == 0)
804 goto drop_it;
805 }
806
807 /* Omit CRC. */
808 len -= 4;
809
810 if (len > RX_COPY_THRESHOLD) {
811 int skb_size;
812 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
813 if (skb_size < 0)
814 goto drop_it;
815 pci_unmap_single(bp->pdev, map,
816 skb_size, PCI_DMA_FROMDEVICE);
817 /* Leave out rx_header */
818 skb_put(skb, len+bp->rx_offset);
819 skb_pull(skb,bp->rx_offset);
820 } else {
821 struct sk_buff *copy_skb;
822
823 b44_recycle_rx(bp, cons, bp->rx_prod);
824 copy_skb = dev_alloc_skb(len + 2);
825 if (copy_skb == NULL)
826 goto drop_it_no_recycle;
827
828 copy_skb->dev = bp->dev;
829 skb_reserve(copy_skb, 2);
830 skb_put(copy_skb, len);
831 /* DMA sync done above, copy just the actual packet */
832 memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
833
834 skb = copy_skb;
835 }
836 skb->ip_summed = CHECKSUM_NONE;
837 skb->protocol = eth_type_trans(skb, bp->dev);
838 netif_receive_skb(skb);
839 bp->dev->last_rx = jiffies;
840 received++;
841 budget--;
842 next_pkt:
843 bp->rx_prod = (bp->rx_prod + 1) &
844 (B44_RX_RING_SIZE - 1);
845 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
846 }
847
848 bp->rx_cons = cons;
849 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
850
851 return received;
852}
853
854static int b44_poll(struct net_device *netdev, int *budget)
855{
856 struct b44 *bp = netdev_priv(netdev);
857 int done;
858
859 spin_lock_irq(&bp->lock);
860
861 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
862 /* spin_lock(&bp->tx_lock); */
863 b44_tx(bp);
864 /* spin_unlock(&bp->tx_lock); */
865 }
866 spin_unlock_irq(&bp->lock);
867
868 done = 1;
869 if (bp->istat & ISTAT_RX) {
870 int orig_budget = *budget;
871 int work_done;
872
873 if (orig_budget > netdev->quota)
874 orig_budget = netdev->quota;
875
876 work_done = b44_rx(bp, orig_budget);
877
878 *budget -= work_done;
879 netdev->quota -= work_done;
880
881 if (work_done >= orig_budget)
882 done = 0;
883 }
884
885 if (bp->istat & ISTAT_ERRORS) {
d15e9c4d
FR
886 unsigned long flags;
887
888 spin_lock_irqsave(&bp->lock, flags);
1da177e4
LT
889 b44_halt(bp);
890 b44_init_rings(bp);
5fc7d61a 891 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
1da177e4 892 netif_wake_queue(bp->dev);
d15e9c4d 893 spin_unlock_irqrestore(&bp->lock, flags);
1da177e4
LT
894 done = 1;
895 }
896
897 if (done) {
898 netif_rx_complete(netdev);
899 b44_enable_ints(bp);
900 }
901
902 return (done ? 0 : 1);
903}
904
7d12e780 905static irqreturn_t b44_interrupt(int irq, void *dev_id)
1da177e4
LT
906{
907 struct net_device *dev = dev_id;
908 struct b44 *bp = netdev_priv(dev);
1da177e4
LT
909 u32 istat, imask;
910 int handled = 0;
911
65b984f2 912 spin_lock(&bp->lock);
1da177e4
LT
913
914 istat = br32(bp, B44_ISTAT);
915 imask = br32(bp, B44_IMASK);
916
e78181fe
JB
917 /* The interrupt mask register controls which interrupt bits
918 * will actually raise an interrupt to the CPU when set by hw/firmware,
919 * but doesn't mask off the bits.
1da177e4
LT
920 */
921 istat &= imask;
922 if (istat) {
923 handled = 1;
ba5eec9c
FR
924
925 if (unlikely(!netif_running(dev))) {
926 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
927 goto irq_ack;
928 }
929
1da177e4
LT
930 if (netif_rx_schedule_prep(dev)) {
931 /* NOTE: These writes are posted by the readback of
932 * the ISTAT register below.
933 */
934 bp->istat = istat;
935 __b44_disable_ints(bp);
936 __netif_rx_schedule(dev);
937 } else {
938 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
939 dev->name);
940 }
941
ba5eec9c 942irq_ack:
1da177e4
LT
943 bw32(bp, B44_ISTAT, istat);
944 br32(bp, B44_ISTAT);
945 }
65b984f2 946 spin_unlock(&bp->lock);
1da177e4
LT
947 return IRQ_RETVAL(handled);
948}
949
950static void b44_tx_timeout(struct net_device *dev)
951{
952 struct b44 *bp = netdev_priv(dev);
953
954 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
955 dev->name);
956
957 spin_lock_irq(&bp->lock);
958
959 b44_halt(bp);
960 b44_init_rings(bp);
5fc7d61a 961 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
962
963 spin_unlock_irq(&bp->lock);
964
965 b44_enable_ints(bp);
966
967 netif_wake_queue(dev);
968}
969
970static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
971{
972 struct b44 *bp = netdev_priv(dev);
973 struct sk_buff *bounce_skb;
c7193693 974 int rc = NETDEV_TX_OK;
1da177e4
LT
975 dma_addr_t mapping;
976 u32 len, entry, ctrl;
977
978 len = skb->len;
979 spin_lock_irq(&bp->lock);
980
981 /* This is a hard error, log it. */
982 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
983 netif_stop_queue(dev);
1da177e4
LT
984 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
985 dev->name);
c7193693 986 goto err_out;
1da177e4
LT
987 }
988
989 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
97db9ee7 990 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
1da177e4 991 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
639b421b
AK
992 if (!dma_mapping_error(mapping))
993 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
1da177e4
LT
994
995 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
996 GFP_ATOMIC|GFP_DMA);
997 if (!bounce_skb)
c7193693 998 goto err_out;
1da177e4
LT
999
1000 mapping = pci_map_single(bp->pdev, bounce_skb->data,
1001 len, PCI_DMA_TODEVICE);
97db9ee7 1002 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
639b421b
AK
1003 if (!dma_mapping_error(mapping))
1004 pci_unmap_single(bp->pdev, mapping,
1da177e4
LT
1005 len, PCI_DMA_TODEVICE);
1006 dev_kfree_skb_any(bounce_skb);
c7193693 1007 goto err_out;
1da177e4
LT
1008 }
1009
1010 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
1011 dev_kfree_skb_any(skb);
1012 skb = bounce_skb;
1013 }
1014
1015 entry = bp->tx_prod;
1016 bp->tx_buffers[entry].skb = skb;
1017 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1018
1019 ctrl = (len & DESC_CTRL_LEN);
1020 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1021 if (entry == (B44_TX_RING_SIZE - 1))
1022 ctrl |= DESC_CTRL_EOT;
1023
1024 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1025 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1026
9f38c636
JL
1027 if (bp->flags & B44_FLAG_TX_RING_HACK)
1028 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1029 entry * sizeof(bp->tx_ring[0]),
1030 DMA_TO_DEVICE);
1031
1da177e4
LT
1032 entry = NEXT_TX(entry);
1033
1034 bp->tx_prod = entry;
1035
1036 wmb();
1037
1038 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1039 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1040 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1041 if (bp->flags & B44_FLAG_REORDER_BUG)
1042 br32(bp, B44_DMATX_PTR);
1043
1044 if (TX_BUFFS_AVAIL(bp) < 1)
1045 netif_stop_queue(dev);
1046
c7193693
FR
1047 dev->trans_start = jiffies;
1048
1049out_unlock:
1da177e4
LT
1050 spin_unlock_irq(&bp->lock);
1051
c7193693 1052 return rc;
1da177e4 1053
c7193693
FR
1054err_out:
1055 rc = NETDEV_TX_BUSY;
1056 goto out_unlock;
1da177e4
LT
1057}
1058
1059static int b44_change_mtu(struct net_device *dev, int new_mtu)
1060{
1061 struct b44 *bp = netdev_priv(dev);
1062
1063 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1064 return -EINVAL;
1065
1066 if (!netif_running(dev)) {
1067 /* We'll just catch it later when the
1068 * device is up'd.
1069 */
1070 dev->mtu = new_mtu;
1071 return 0;
1072 }
1073
1074 spin_lock_irq(&bp->lock);
1075 b44_halt(bp);
1076 dev->mtu = new_mtu;
1077 b44_init_rings(bp);
5fc7d61a 1078 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1079 spin_unlock_irq(&bp->lock);
1080
1081 b44_enable_ints(bp);
10badc21 1082
1da177e4
LT
1083 return 0;
1084}
1085
1086/* Free up pending packets in all rx/tx rings.
1087 *
1088 * The chip has been shut down and the driver detached from
1089 * the networking, so no interrupts or new tx packets will
1090 * end up in the driver. bp->lock is not held and we are not
1091 * in an interrupt context and thus may sleep.
1092 */
1093static void b44_free_rings(struct b44 *bp)
1094{
1095 struct ring_info *rp;
1096 int i;
1097
1098 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1099 rp = &bp->rx_buffers[i];
1100
1101 if (rp->skb == NULL)
1102 continue;
1103 pci_unmap_single(bp->pdev,
1104 pci_unmap_addr(rp, mapping),
1105 RX_PKT_BUF_SZ,
1106 PCI_DMA_FROMDEVICE);
1107 dev_kfree_skb_any(rp->skb);
1108 rp->skb = NULL;
1109 }
1110
1111 /* XXX needs changes once NETIF_F_SG is set... */
1112 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1113 rp = &bp->tx_buffers[i];
1114
1115 if (rp->skb == NULL)
1116 continue;
1117 pci_unmap_single(bp->pdev,
1118 pci_unmap_addr(rp, mapping),
1119 rp->skb->len,
1120 PCI_DMA_TODEVICE);
1121 dev_kfree_skb_any(rp->skb);
1122 rp->skb = NULL;
1123 }
1124}
1125
1126/* Initialize tx/rx rings for packet processing.
1127 *
1128 * The chip has been shut down and the driver detached from
1129 * the networking, so no interrupts or new tx packets will
874a6214 1130 * end up in the driver.
1da177e4
LT
1131 */
1132static void b44_init_rings(struct b44 *bp)
1133{
1134 int i;
1135
1136 b44_free_rings(bp);
1137
1138 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1139 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1140
9f38c636
JL
1141 if (bp->flags & B44_FLAG_RX_RING_HACK)
1142 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1143 DMA_TABLE_BYTES,
1144 PCI_DMA_BIDIRECTIONAL);
1145
1146 if (bp->flags & B44_FLAG_TX_RING_HACK)
1147 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1148 DMA_TABLE_BYTES,
1149 PCI_DMA_TODEVICE);
1150
1da177e4
LT
1151 for (i = 0; i < bp->rx_pending; i++) {
1152 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1153 break;
1154 }
1155}
1156
1157/*
1158 * Must not be invoked with interrupt sources disabled and
1159 * the hardware shutdown down.
1160 */
1161static void b44_free_consistent(struct b44 *bp)
1162{
b4558ea9
JJ
1163 kfree(bp->rx_buffers);
1164 bp->rx_buffers = NULL;
1165 kfree(bp->tx_buffers);
1166 bp->tx_buffers = NULL;
1da177e4 1167 if (bp->rx_ring) {
9f38c636
JL
1168 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1169 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1170 DMA_TABLE_BYTES,
1171 DMA_BIDIRECTIONAL);
1172 kfree(bp->rx_ring);
1173 } else
1174 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1175 bp->rx_ring, bp->rx_ring_dma);
1da177e4 1176 bp->rx_ring = NULL;
9f38c636 1177 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1da177e4
LT
1178 }
1179 if (bp->tx_ring) {
9f38c636
JL
1180 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1181 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1182 DMA_TABLE_BYTES,
1183 DMA_TO_DEVICE);
1184 kfree(bp->tx_ring);
1185 } else
1186 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1187 bp->tx_ring, bp->tx_ring_dma);
1da177e4 1188 bp->tx_ring = NULL;
9f38c636 1189 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1da177e4
LT
1190 }
1191}
1192
1193/*
1194 * Must not be invoked with interrupt sources disabled and
1195 * the hardware shutdown down. Can sleep.
1196 */
1197static int b44_alloc_consistent(struct b44 *bp)
1198{
1199 int size;
1200
1201 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
874a6214 1202 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1da177e4
LT
1203 if (!bp->rx_buffers)
1204 goto out_err;
1da177e4
LT
1205
1206 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
874a6214 1207 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1da177e4
LT
1208 if (!bp->tx_buffers)
1209 goto out_err;
1da177e4
LT
1210
1211 size = DMA_TABLE_BYTES;
1212 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
9f38c636
JL
1213 if (!bp->rx_ring) {
1214 /* Allocation may have failed due to pci_alloc_consistent
1215 insisting on use of GFP_DMA, which is more restrictive
1216 than necessary... */
1217 struct dma_desc *rx_ring;
1218 dma_addr_t rx_ring_dma;
1219
874a6214
FR
1220 rx_ring = kzalloc(size, GFP_KERNEL);
1221 if (!rx_ring)
9f38c636
JL
1222 goto out_err;
1223
9f38c636
JL
1224 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1225 DMA_TABLE_BYTES,
1226 DMA_BIDIRECTIONAL);
1227
639b421b 1228 if (dma_mapping_error(rx_ring_dma) ||
97db9ee7 1229 rx_ring_dma + size > DMA_30BIT_MASK) {
9f38c636
JL
1230 kfree(rx_ring);
1231 goto out_err;
1232 }
1233
1234 bp->rx_ring = rx_ring;
1235 bp->rx_ring_dma = rx_ring_dma;
1236 bp->flags |= B44_FLAG_RX_RING_HACK;
1237 }
1da177e4
LT
1238
1239 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
9f38c636
JL
1240 if (!bp->tx_ring) {
1241 /* Allocation may have failed due to pci_alloc_consistent
1242 insisting on use of GFP_DMA, which is more restrictive
1243 than necessary... */
1244 struct dma_desc *tx_ring;
1245 dma_addr_t tx_ring_dma;
1246
874a6214
FR
1247 tx_ring = kzalloc(size, GFP_KERNEL);
1248 if (!tx_ring)
9f38c636
JL
1249 goto out_err;
1250
9f38c636
JL
1251 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1252 DMA_TABLE_BYTES,
1253 DMA_TO_DEVICE);
1254
639b421b 1255 if (dma_mapping_error(tx_ring_dma) ||
97db9ee7 1256 tx_ring_dma + size > DMA_30BIT_MASK) {
9f38c636
JL
1257 kfree(tx_ring);
1258 goto out_err;
1259 }
1260
1261 bp->tx_ring = tx_ring;
1262 bp->tx_ring_dma = tx_ring_dma;
1263 bp->flags |= B44_FLAG_TX_RING_HACK;
1264 }
1da177e4
LT
1265
1266 return 0;
1267
1268out_err:
1269 b44_free_consistent(bp);
1270 return -ENOMEM;
1271}
1272
1273/* bp->lock is held. */
1274static void b44_clear_stats(struct b44 *bp)
1275{
1276 unsigned long reg;
1277
1278 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1279 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1280 br32(bp, reg);
1281 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1282 br32(bp, reg);
1283}
1284
1285/* bp->lock is held. */
1286static void b44_chip_reset(struct b44 *bp)
1287{
1288 if (ssb_is_core_up(bp)) {
1289 bw32(bp, B44_RCV_LAZY, 0);
1290 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1291 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1292 bw32(bp, B44_DMATX_CTRL, 0);
1293 bp->tx_prod = bp->tx_cons = 0;
1294 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1295 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1296 100, 0);
1297 }
1298 bw32(bp, B44_DMARX_CTRL, 0);
1299 bp->rx_prod = bp->rx_cons = 0;
1300 } else {
1301 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1302 SBINTVEC_ENET0 :
1303 SBINTVEC_ENET1));
1304 }
1305
1306 ssb_core_reset(bp);
1307
1308 b44_clear_stats(bp);
1309
1310 /* Make PHY accessible. */
1311 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1312 (0x0d & MDIO_CTRL_MAXF_MASK)));
1313 br32(bp, B44_MDIO_CTRL);
1314
1315 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1316 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1317 br32(bp, B44_ENET_CTRL);
1318 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1319 } else {
1320 u32 val = br32(bp, B44_DEVCTRL);
1321
1322 if (val & DEVCTRL_EPR) {
1323 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1324 br32(bp, B44_DEVCTRL);
1325 udelay(100);
1326 }
1327 bp->flags |= B44_FLAG_INTERNAL_PHY;
1328 }
1329}
1330
1331/* bp->lock is held. */
1332static void b44_halt(struct b44 *bp)
1333{
1334 b44_disable_ints(bp);
1335 b44_chip_reset(bp);
1336}
1337
1338/* bp->lock is held. */
1339static void __b44_set_mac_addr(struct b44 *bp)
1340{
1341 bw32(bp, B44_CAM_CTRL, 0);
1342 if (!(bp->dev->flags & IFF_PROMISC)) {
1343 u32 val;
1344
1345 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1346 val = br32(bp, B44_CAM_CTRL);
1347 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1348 }
1349}
1350
1351static int b44_set_mac_addr(struct net_device *dev, void *p)
1352{
1353 struct b44 *bp = netdev_priv(dev);
1354 struct sockaddr *addr = p;
1355
1356 if (netif_running(dev))
1357 return -EBUSY;
1358
391fc09a
GZ
1359 if (!is_valid_ether_addr(addr->sa_data))
1360 return -EINVAL;
1361
1da177e4
LT
1362 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1363
1364 spin_lock_irq(&bp->lock);
1365 __b44_set_mac_addr(bp);
1366 spin_unlock_irq(&bp->lock);
1367
1368 return 0;
1369}
1370
1371/* Called at device open time to get the chip ready for
1372 * packet processing. Invoked with bp->lock held.
1373 */
1374static void __b44_set_rx_mode(struct net_device *);
5fc7d61a 1375static void b44_init_hw(struct b44 *bp, int reset_kind)
1da177e4
LT
1376{
1377 u32 val;
1378
1379 b44_chip_reset(bp);
5fc7d61a 1380 if (reset_kind == B44_FULL_RESET) {
00e8b3aa
GZ
1381 b44_phy_reset(bp);
1382 b44_setup_phy(bp);
1383 }
1da177e4
LT
1384
1385 /* Enable CRC32, set proper LED modes and power on PHY */
1386 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1387 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1388
1389 /* This sets the MAC address too. */
1390 __b44_set_rx_mode(bp->dev);
1391
1392 /* MTU + eth header + possible VLAN tag + struct rx_header */
1393 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1394 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1395
1396 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
5fc7d61a
MC
1397 if (reset_kind == B44_PARTIAL_RESET) {
1398 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1399 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1400 } else {
00e8b3aa
GZ
1401 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1402 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1403 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1404 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1405 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1da177e4 1406
00e8b3aa
GZ
1407 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1408 bp->rx_prod = bp->rx_pending;
1da177e4 1409
00e8b3aa 1410 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
00e8b3aa 1411 }
1da177e4
LT
1412
1413 val = br32(bp, B44_ENET_CTRL);
1414 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1415}
1416
1417static int b44_open(struct net_device *dev)
1418{
1419 struct b44 *bp = netdev_priv(dev);
1420 int err;
1421
1422 err = b44_alloc_consistent(bp);
1423 if (err)
6c2f4267 1424 goto out;
1da177e4
LT
1425
1426 b44_init_rings(bp);
5fc7d61a 1427 b44_init_hw(bp, B44_FULL_RESET);
1da177e4 1428
e254e9bf
JL
1429 b44_check_phy(bp);
1430
1fb9df5d 1431 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
6c2f4267
FR
1432 if (unlikely(err < 0)) {
1433 b44_chip_reset(bp);
1434 b44_free_rings(bp);
1435 b44_free_consistent(bp);
1436 goto out;
1437 }
1da177e4
LT
1438
1439 init_timer(&bp->timer);
1440 bp->timer.expires = jiffies + HZ;
1441 bp->timer.data = (unsigned long) bp;
1442 bp->timer.function = b44_timer;
1443 add_timer(&bp->timer);
1444
1445 b44_enable_ints(bp);
d9e2d185 1446 netif_start_queue(dev);
6c2f4267 1447out:
1da177e4
LT
1448 return err;
1449}
1450
1451#if 0
1452/*static*/ void b44_dump_state(struct b44 *bp)
1453{
1454 u32 val32, val32_2, val32_3, val32_4, val32_5;
1455 u16 val16;
1456
1457 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1458 printk("DEBUG: PCI status [%04x] \n", val16);
1459
1460}
1461#endif
1462
1463#ifdef CONFIG_NET_POLL_CONTROLLER
1464/*
1465 * Polling receive - used by netconsole and other diagnostic tools
1466 * to allow network i/o with interrupts disabled.
1467 */
1468static void b44_poll_controller(struct net_device *dev)
1469{
1470 disable_irq(dev->irq);
7d12e780 1471 b44_interrupt(dev->irq, dev);
1da177e4
LT
1472 enable_irq(dev->irq);
1473}
1474#endif
1475
725ad800
GZ
1476static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1477{
1478 u32 i;
1479 u32 *pattern = (u32 *) pp;
1480
1481 for (i = 0; i < bytes; i += sizeof(u32)) {
1482 bw32(bp, B44_FILT_ADDR, table_offset + i);
1483 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1484 }
1485}
1486
1487static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1488{
1489 int magicsync = 6;
1490 int k, j, len = offset;
1491 int ethaddr_bytes = ETH_ALEN;
1492
1493 memset(ppattern + offset, 0xff, magicsync);
1494 for (j = 0; j < magicsync; j++)
1495 set_bit(len++, (unsigned long *) pmask);
1496
1497 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1498 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1499 ethaddr_bytes = ETH_ALEN;
1500 else
1501 ethaddr_bytes = B44_PATTERN_SIZE - len;
1502 if (ethaddr_bytes <=0)
1503 break;
1504 for (k = 0; k< ethaddr_bytes; k++) {
1505 ppattern[offset + magicsync +
1506 (j * ETH_ALEN) + k] = macaddr[k];
1507 len++;
1508 set_bit(len, (unsigned long *) pmask);
1509 }
1510 }
1511 return len - 1;
1512}
1513
1514/* Setup magic packet patterns in the b44 WOL
1515 * pattern matching filter.
1516 */
1517static void b44_setup_pseudo_magicp(struct b44 *bp)
1518{
1519
1520 u32 val;
1521 int plen0, plen1, plen2;
1522 u8 *pwol_pattern;
1523 u8 pwol_mask[B44_PMASK_SIZE];
1524
1525 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1526 if (!pwol_pattern) {
1527 printk(KERN_ERR PFX "Memory not available for WOL\n");
1528 return;
1529 }
1530
1531 /* Ipv4 magic packet pattern - pattern 0.*/
1532 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1533 memset(pwol_mask, 0, B44_PMASK_SIZE);
1534 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1535 B44_ETHIPV4UDP_HLEN);
1536
1537 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1538 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1539
1540 /* Raw ethernet II magic packet pattern - pattern 1 */
1541 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1542 memset(pwol_mask, 0, B44_PMASK_SIZE);
1543 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1544 ETH_HLEN);
1545
1546 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1547 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1548 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1549 B44_PMASK_BASE + B44_PMASK_SIZE);
1550
1551 /* Ipv6 magic packet pattern - pattern 2 */
1552 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1553 memset(pwol_mask, 0, B44_PMASK_SIZE);
1554 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1555 B44_ETHIPV6UDP_HLEN);
1556
1557 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1558 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1559 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1560 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1561
1562 kfree(pwol_pattern);
1563
1564 /* set these pattern's lengths: one less than each real length */
1565 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1566 bw32(bp, B44_WKUP_LEN, val);
1567
1568 /* enable wakeup pattern matching */
1569 val = br32(bp, B44_DEVCTRL);
1570 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1571
1572}
52cafd96
GZ
1573
1574static void b44_setup_wol(struct b44 *bp)
1575{
1576 u32 val;
1577 u16 pmval;
1578
1579 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1580
1581 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1582
1583 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1584
1585 val = bp->dev->dev_addr[2] << 24 |
1586 bp->dev->dev_addr[3] << 16 |
1587 bp->dev->dev_addr[4] << 8 |
1588 bp->dev->dev_addr[5];
1589 bw32(bp, B44_ADDR_LO, val);
1590
1591 val = bp->dev->dev_addr[0] << 8 |
1592 bp->dev->dev_addr[1];
1593 bw32(bp, B44_ADDR_HI, val);
1594
1595 val = br32(bp, B44_DEVCTRL);
1596 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1597
725ad800
GZ
1598 } else {
1599 b44_setup_pseudo_magicp(bp);
1600 }
52cafd96
GZ
1601
1602 val = br32(bp, B44_SBTMSLOW);
1603 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1604
1605 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1606 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1607
1608}
1609
1da177e4
LT
1610static int b44_close(struct net_device *dev)
1611{
1612 struct b44 *bp = netdev_priv(dev);
1613
1614 netif_stop_queue(dev);
1615
ba5eec9c
FR
1616 netif_poll_disable(dev);
1617
1da177e4
LT
1618 del_timer_sync(&bp->timer);
1619
1620 spin_lock_irq(&bp->lock);
1621
1622#if 0
1623 b44_dump_state(bp);
1624#endif
1625 b44_halt(bp);
1626 b44_free_rings(bp);
c35ca399 1627 netif_carrier_off(dev);
1da177e4
LT
1628
1629 spin_unlock_irq(&bp->lock);
1630
1631 free_irq(dev->irq, dev);
1632
ba5eec9c
FR
1633 netif_poll_enable(dev);
1634
52cafd96 1635 if (bp->flags & B44_FLAG_WOL_ENABLE) {
5fc7d61a 1636 b44_init_hw(bp, B44_PARTIAL_RESET);
52cafd96
GZ
1637 b44_setup_wol(bp);
1638 }
1639
1da177e4
LT
1640 b44_free_consistent(bp);
1641
1642 return 0;
1643}
1644
1645static struct net_device_stats *b44_get_stats(struct net_device *dev)
1646{
1647 struct b44 *bp = netdev_priv(dev);
1648 struct net_device_stats *nstat = &bp->stats;
1649 struct b44_hw_stats *hwstat = &bp->hw_stats;
1650
1651 /* Convert HW stats into netdevice stats. */
1652 nstat->rx_packets = hwstat->rx_pkts;
1653 nstat->tx_packets = hwstat->tx_pkts;
1654 nstat->rx_bytes = hwstat->rx_octets;
1655 nstat->tx_bytes = hwstat->tx_octets;
1656 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1657 hwstat->tx_oversize_pkts +
1658 hwstat->tx_underruns +
1659 hwstat->tx_excessive_cols +
1660 hwstat->tx_late_cols);
1661 nstat->multicast = hwstat->tx_multicast_pkts;
1662 nstat->collisions = hwstat->tx_total_cols;
1663
1664 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1665 hwstat->rx_undersize);
1666 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1667 nstat->rx_frame_errors = hwstat->rx_align_errs;
1668 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1669 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1670 hwstat->rx_oversize_pkts +
1671 hwstat->rx_missed_pkts +
1672 hwstat->rx_crc_align_errs +
1673 hwstat->rx_undersize +
1674 hwstat->rx_crc_errs +
1675 hwstat->rx_align_errs +
1676 hwstat->rx_symbol_errs);
1677
1678 nstat->tx_aborted_errors = hwstat->tx_underruns;
1679#if 0
1680 /* Carrier lost counter seems to be broken for some devices */
1681 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1682#endif
1683
1684 return nstat;
1685}
1686
1687static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1688{
1689 struct dev_mc_list *mclist;
1690 int i, num_ents;
1691
1692 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1693 mclist = dev->mc_list;
1694 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1695 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1696 }
1697 return i+1;
1698}
1699
1700static void __b44_set_rx_mode(struct net_device *dev)
1701{
1702 struct b44 *bp = netdev_priv(dev);
1703 u32 val;
1da177e4
LT
1704
1705 val = br32(bp, B44_RXCONFIG);
1706 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1707 if (dev->flags & IFF_PROMISC) {
1708 val |= RXCONFIG_PROMISC;
1709 bw32(bp, B44_RXCONFIG, val);
1710 } else {
874a6214
FR
1711 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1712 int i = 0;
1713
1da177e4
LT
1714 __b44_set_mac_addr(bp);
1715
2f614fe0
JG
1716 if ((dev->flags & IFF_ALLMULTI) ||
1717 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1da177e4
LT
1718 val |= RXCONFIG_ALLMULTI;
1719 else
874a6214 1720 i = __b44_load_mcast(bp, dev);
10badc21 1721
2f614fe0 1722 for (; i < 64; i++)
10badc21 1723 __b44_cam_write(bp, zero, i);
2f614fe0 1724
1da177e4
LT
1725 bw32(bp, B44_RXCONFIG, val);
1726 val = br32(bp, B44_CAM_CTRL);
1727 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1728 }
1729}
1730
1731static void b44_set_rx_mode(struct net_device *dev)
1732{
1733 struct b44 *bp = netdev_priv(dev);
1734
1735 spin_lock_irq(&bp->lock);
1736 __b44_set_rx_mode(dev);
1737 spin_unlock_irq(&bp->lock);
1738}
1739
1740static u32 b44_get_msglevel(struct net_device *dev)
1741{
1742 struct b44 *bp = netdev_priv(dev);
1743 return bp->msg_enable;
1744}
1745
1746static void b44_set_msglevel(struct net_device *dev, u32 value)
1747{
1748 struct b44 *bp = netdev_priv(dev);
1749 bp->msg_enable = value;
1750}
1751
1752static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1753{
1754 struct b44 *bp = netdev_priv(dev);
1755 struct pci_dev *pci_dev = bp->pdev;
1756
1757 strcpy (info->driver, DRV_MODULE_NAME);
1758 strcpy (info->version, DRV_MODULE_VERSION);
1759 strcpy (info->bus_info, pci_name(pci_dev));
1760}
1761
1762static int b44_nway_reset(struct net_device *dev)
1763{
1764 struct b44 *bp = netdev_priv(dev);
1765 u32 bmcr;
1766 int r;
1767
1768 spin_lock_irq(&bp->lock);
1769 b44_readphy(bp, MII_BMCR, &bmcr);
1770 b44_readphy(bp, MII_BMCR, &bmcr);
1771 r = -EINVAL;
1772 if (bmcr & BMCR_ANENABLE) {
1773 b44_writephy(bp, MII_BMCR,
1774 bmcr | BMCR_ANRESTART);
1775 r = 0;
1776 }
1777 spin_unlock_irq(&bp->lock);
1778
1779 return r;
1780}
1781
1782static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1783{
1784 struct b44 *bp = netdev_priv(dev);
1785
1da177e4
LT
1786 cmd->supported = (SUPPORTED_Autoneg);
1787 cmd->supported |= (SUPPORTED_100baseT_Half |
1788 SUPPORTED_100baseT_Full |
1789 SUPPORTED_10baseT_Half |
1790 SUPPORTED_10baseT_Full |
1791 SUPPORTED_MII);
1792
1793 cmd->advertising = 0;
1794 if (bp->flags & B44_FLAG_ADV_10HALF)
adf6e000 1795 cmd->advertising |= ADVERTISED_10baseT_Half;
1da177e4 1796 if (bp->flags & B44_FLAG_ADV_10FULL)
adf6e000 1797 cmd->advertising |= ADVERTISED_10baseT_Full;
1da177e4 1798 if (bp->flags & B44_FLAG_ADV_100HALF)
adf6e000 1799 cmd->advertising |= ADVERTISED_100baseT_Half;
1da177e4 1800 if (bp->flags & B44_FLAG_ADV_100FULL)
adf6e000
MW
1801 cmd->advertising |= ADVERTISED_100baseT_Full;
1802 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1da177e4
LT
1803 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1804 SPEED_100 : SPEED_10;
1805 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1806 DUPLEX_FULL : DUPLEX_HALF;
1807 cmd->port = 0;
1808 cmd->phy_address = bp->phy_addr;
1809 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1810 XCVR_INTERNAL : XCVR_EXTERNAL;
1811 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1812 AUTONEG_DISABLE : AUTONEG_ENABLE;
47b9c3b1
GZ
1813 if (cmd->autoneg == AUTONEG_ENABLE)
1814 cmd->advertising |= ADVERTISED_Autoneg;
1815 if (!netif_running(dev)){
1816 cmd->speed = 0;
1817 cmd->duplex = 0xff;
1818 }
1da177e4
LT
1819 cmd->maxtxpkt = 0;
1820 cmd->maxrxpkt = 0;
1821 return 0;
1822}
1823
1824static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1825{
1826 struct b44 *bp = netdev_priv(dev);
1827
1da177e4
LT
1828 /* We do not support gigabit. */
1829 if (cmd->autoneg == AUTONEG_ENABLE) {
1830 if (cmd->advertising &
1831 (ADVERTISED_1000baseT_Half |
1832 ADVERTISED_1000baseT_Full))
1833 return -EINVAL;
1834 } else if ((cmd->speed != SPEED_100 &&
1835 cmd->speed != SPEED_10) ||
1836 (cmd->duplex != DUPLEX_HALF &&
1837 cmd->duplex != DUPLEX_FULL)) {
1838 return -EINVAL;
1839 }
1840
1841 spin_lock_irq(&bp->lock);
1842
1843 if (cmd->autoneg == AUTONEG_ENABLE) {
47b9c3b1
GZ
1844 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1845 B44_FLAG_100_BASE_T |
1846 B44_FLAG_FULL_DUPLEX |
1847 B44_FLAG_ADV_10HALF |
1da177e4
LT
1848 B44_FLAG_ADV_10FULL |
1849 B44_FLAG_ADV_100HALF |
1850 B44_FLAG_ADV_100FULL);
47b9c3b1
GZ
1851 if (cmd->advertising == 0) {
1852 bp->flags |= (B44_FLAG_ADV_10HALF |
1853 B44_FLAG_ADV_10FULL |
1854 B44_FLAG_ADV_100HALF |
1855 B44_FLAG_ADV_100FULL);
1856 } else {
1857 if (cmd->advertising & ADVERTISED_10baseT_Half)
1858 bp->flags |= B44_FLAG_ADV_10HALF;
1859 if (cmd->advertising & ADVERTISED_10baseT_Full)
1860 bp->flags |= B44_FLAG_ADV_10FULL;
1861 if (cmd->advertising & ADVERTISED_100baseT_Half)
1862 bp->flags |= B44_FLAG_ADV_100HALF;
1863 if (cmd->advertising & ADVERTISED_100baseT_Full)
1864 bp->flags |= B44_FLAG_ADV_100FULL;
1865 }
1da177e4
LT
1866 } else {
1867 bp->flags |= B44_FLAG_FORCE_LINK;
47b9c3b1 1868 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1da177e4
LT
1869 if (cmd->speed == SPEED_100)
1870 bp->flags |= B44_FLAG_100_BASE_T;
1871 if (cmd->duplex == DUPLEX_FULL)
1872 bp->flags |= B44_FLAG_FULL_DUPLEX;
1873 }
1874
47b9c3b1
GZ
1875 if (netif_running(dev))
1876 b44_setup_phy(bp);
1da177e4
LT
1877
1878 spin_unlock_irq(&bp->lock);
1879
1880 return 0;
1881}
1882
1883static void b44_get_ringparam(struct net_device *dev,
1884 struct ethtool_ringparam *ering)
1885{
1886 struct b44 *bp = netdev_priv(dev);
1887
1888 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1889 ering->rx_pending = bp->rx_pending;
1890
1891 /* XXX ethtool lacks a tx_max_pending, oops... */
1892}
1893
1894static int b44_set_ringparam(struct net_device *dev,
1895 struct ethtool_ringparam *ering)
1896{
1897 struct b44 *bp = netdev_priv(dev);
1898
1899 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1900 (ering->rx_mini_pending != 0) ||
1901 (ering->rx_jumbo_pending != 0) ||
1902 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1903 return -EINVAL;
1904
1905 spin_lock_irq(&bp->lock);
1906
1907 bp->rx_pending = ering->rx_pending;
1908 bp->tx_pending = ering->tx_pending;
1909
1910 b44_halt(bp);
1911 b44_init_rings(bp);
5fc7d61a 1912 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1913 netif_wake_queue(bp->dev);
1914 spin_unlock_irq(&bp->lock);
1915
1916 b44_enable_ints(bp);
10badc21 1917
1da177e4
LT
1918 return 0;
1919}
1920
1921static void b44_get_pauseparam(struct net_device *dev,
1922 struct ethtool_pauseparam *epause)
1923{
1924 struct b44 *bp = netdev_priv(dev);
1925
1926 epause->autoneg =
1927 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1928 epause->rx_pause =
1929 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1930 epause->tx_pause =
1931 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1932}
1933
1934static int b44_set_pauseparam(struct net_device *dev,
1935 struct ethtool_pauseparam *epause)
1936{
1937 struct b44 *bp = netdev_priv(dev);
1938
1939 spin_lock_irq(&bp->lock);
1940 if (epause->autoneg)
1941 bp->flags |= B44_FLAG_PAUSE_AUTO;
1942 else
1943 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1944 if (epause->rx_pause)
1945 bp->flags |= B44_FLAG_RX_PAUSE;
1946 else
1947 bp->flags &= ~B44_FLAG_RX_PAUSE;
1948 if (epause->tx_pause)
1949 bp->flags |= B44_FLAG_TX_PAUSE;
1950 else
1951 bp->flags &= ~B44_FLAG_TX_PAUSE;
1952 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1953 b44_halt(bp);
1954 b44_init_rings(bp);
5fc7d61a 1955 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1956 } else {
1957 __b44_set_flow_ctrl(bp, bp->flags);
1958 }
1959 spin_unlock_irq(&bp->lock);
1960
1961 b44_enable_ints(bp);
10badc21 1962
1da177e4
LT
1963 return 0;
1964}
1965
3353930d
FR
1966static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1967{
1968 switch(stringset) {
1969 case ETH_SS_STATS:
1970 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1971 break;
1972 }
1973}
1974
1975static int b44_get_stats_count(struct net_device *dev)
1976{
1977 return ARRAY_SIZE(b44_gstrings);
1978}
1979
1980static void b44_get_ethtool_stats(struct net_device *dev,
1981 struct ethtool_stats *stats, u64 *data)
1982{
1983 struct b44 *bp = netdev_priv(dev);
1984 u32 *val = &bp->hw_stats.tx_good_octets;
1985 u32 i;
1986
1987 spin_lock_irq(&bp->lock);
1988
1989 b44_stats_update(bp);
1990
1991 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1992 *data++ = *val++;
1993
1994 spin_unlock_irq(&bp->lock);
1995}
1996
52cafd96
GZ
1997static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1998{
1999 struct b44 *bp = netdev_priv(dev);
2000
2001 wol->supported = WAKE_MAGIC;
2002 if (bp->flags & B44_FLAG_WOL_ENABLE)
2003 wol->wolopts = WAKE_MAGIC;
2004 else
2005 wol->wolopts = 0;
2006 memset(&wol->sopass, 0, sizeof(wol->sopass));
2007}
2008
2009static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2010{
2011 struct b44 *bp = netdev_priv(dev);
2012
2013 spin_lock_irq(&bp->lock);
2014 if (wol->wolopts & WAKE_MAGIC)
2015 bp->flags |= B44_FLAG_WOL_ENABLE;
2016 else
2017 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2018 spin_unlock_irq(&bp->lock);
2019
2020 return 0;
2021}
2022
7282d491 2023static const struct ethtool_ops b44_ethtool_ops = {
1da177e4
LT
2024 .get_drvinfo = b44_get_drvinfo,
2025 .get_settings = b44_get_settings,
2026 .set_settings = b44_set_settings,
2027 .nway_reset = b44_nway_reset,
2028 .get_link = ethtool_op_get_link,
52cafd96
GZ
2029 .get_wol = b44_get_wol,
2030 .set_wol = b44_set_wol,
1da177e4
LT
2031 .get_ringparam = b44_get_ringparam,
2032 .set_ringparam = b44_set_ringparam,
2033 .get_pauseparam = b44_get_pauseparam,
2034 .set_pauseparam = b44_set_pauseparam,
2035 .get_msglevel = b44_get_msglevel,
2036 .set_msglevel = b44_set_msglevel,
3353930d
FR
2037 .get_strings = b44_get_strings,
2038 .get_stats_count = b44_get_stats_count,
2039 .get_ethtool_stats = b44_get_ethtool_stats,
2160de53 2040 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
2041};
2042
2043static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2044{
2045 struct mii_ioctl_data *data = if_mii(ifr);
2046 struct b44 *bp = netdev_priv(dev);
3410572d
FR
2047 int err = -EINVAL;
2048
2049 if (!netif_running(dev))
2050 goto out;
1da177e4
LT
2051
2052 spin_lock_irq(&bp->lock);
2053 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2054 spin_unlock_irq(&bp->lock);
3410572d 2055out:
1da177e4
LT
2056 return err;
2057}
2058
2059/* Read 128-bytes of EEPROM. */
2060static int b44_read_eeprom(struct b44 *bp, u8 *data)
2061{
2062 long i;
a7bed27d 2063 __le16 *ptr = (__le16 *) data;
1da177e4
LT
2064
2065 for (i = 0; i < 128; i += 2)
6f627683 2066 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
1da177e4
LT
2067
2068 return 0;
2069}
2070
2071static int __devinit b44_get_invariants(struct b44 *bp)
2072{
2073 u8 eeprom[128];
2074 int err;
2075
2076 err = b44_read_eeprom(bp, &eeprom[0]);
2077 if (err)
2078 goto out;
2079
2080 bp->dev->dev_addr[0] = eeprom[79];
2081 bp->dev->dev_addr[1] = eeprom[78];
2082 bp->dev->dev_addr[2] = eeprom[81];
2083 bp->dev->dev_addr[3] = eeprom[80];
2084 bp->dev->dev_addr[4] = eeprom[83];
2085 bp->dev->dev_addr[5] = eeprom[82];
391fc09a
GZ
2086
2087 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2088 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2089 return -EINVAL;
2090 }
2091
2160de53 2092 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1da177e4
LT
2093
2094 bp->phy_addr = eeprom[90] & 0x1f;
2095
2096 /* With this, plus the rx_header prepended to the data by the
2097 * hardware, we'll land the ethernet header on a 2-byte boundary.
2098 */
2099 bp->rx_offset = 30;
2100
2101 bp->imask = IMASK_DEF;
2102
2103 bp->core_unit = ssb_core_unit(bp);
2104 bp->dma_offset = SB_PCI_DMA;
2105
10badc21 2106 /* XXX - really required?
1da177e4
LT
2107 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2108 */
52cafd96
GZ
2109
2110 if (ssb_get_core_rev(bp) >= 7)
2111 bp->flags |= B44_FLAG_B0_ANDLATER;
2112
1da177e4
LT
2113out:
2114 return err;
2115}
2116
2117static int __devinit b44_init_one(struct pci_dev *pdev,
2118 const struct pci_device_id *ent)
2119{
2120 static int b44_version_printed = 0;
2121 unsigned long b44reg_base, b44reg_len;
2122 struct net_device *dev;
2123 struct b44 *bp;
2124 int err, i;
2125
2126 if (b44_version_printed++ == 0)
2127 printk(KERN_INFO "%s", version);
2128
2129 err = pci_enable_device(pdev);
2130 if (err) {
9b91cf9d 2131 dev_err(&pdev->dev, "Cannot enable PCI device, "
1da177e4
LT
2132 "aborting.\n");
2133 return err;
2134 }
2135
2136 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 2137 dev_err(&pdev->dev,
2e8a538d 2138 "Cannot find proper PCI device "
1da177e4
LT
2139 "base address, aborting.\n");
2140 err = -ENODEV;
2141 goto err_out_disable_pdev;
2142 }
2143
2144 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2145 if (err) {
9b91cf9d 2146 dev_err(&pdev->dev,
2e8a538d 2147 "Cannot obtain PCI resources, aborting.\n");
1da177e4
LT
2148 goto err_out_disable_pdev;
2149 }
2150
2151 pci_set_master(pdev);
2152
97db9ee7 2153 err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1da177e4 2154 if (err) {
9b91cf9d 2155 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
1da177e4
LT
2156 goto err_out_free_res;
2157 }
10badc21 2158
97db9ee7 2159 err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1da177e4 2160 if (err) {
9b91cf9d 2161 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
874a6214 2162 goto err_out_free_res;
1da177e4
LT
2163 }
2164
2165 b44reg_base = pci_resource_start(pdev, 0);
2166 b44reg_len = pci_resource_len(pdev, 0);
2167
2168 dev = alloc_etherdev(sizeof(*bp));
2169 if (!dev) {
9b91cf9d 2170 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
1da177e4
LT
2171 err = -ENOMEM;
2172 goto err_out_free_res;
2173 }
2174
2175 SET_MODULE_OWNER(dev);
2176 SET_NETDEV_DEV(dev,&pdev->dev);
2177
2178 /* No interesting netdevice features in this card... */
2179 dev->features |= 0;
2180
2181 bp = netdev_priv(dev);
2182 bp->pdev = pdev;
2183 bp->dev = dev;
874a6214
FR
2184
2185 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1da177e4
LT
2186
2187 spin_lock_init(&bp->lock);
2188
2189 bp->regs = ioremap(b44reg_base, b44reg_len);
2190 if (bp->regs == 0UL) {
9b91cf9d 2191 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
1da177e4
LT
2192 err = -ENOMEM;
2193 goto err_out_free_dev;
2194 }
2195
2196 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2197 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2198
2199 dev->open = b44_open;
2200 dev->stop = b44_close;
2201 dev->hard_start_xmit = b44_start_xmit;
2202 dev->get_stats = b44_get_stats;
2203 dev->set_multicast_list = b44_set_rx_mode;
2204 dev->set_mac_address = b44_set_mac_addr;
2205 dev->do_ioctl = b44_ioctl;
2206 dev->tx_timeout = b44_tx_timeout;
2207 dev->poll = b44_poll;
2208 dev->weight = 64;
2209 dev->watchdog_timeo = B44_TX_TIMEOUT;
2210#ifdef CONFIG_NET_POLL_CONTROLLER
2211 dev->poll_controller = b44_poll_controller;
2212#endif
2213 dev->change_mtu = b44_change_mtu;
2214 dev->irq = pdev->irq;
2215 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2216
c35ca399
SH
2217 netif_carrier_off(dev);
2218
1da177e4
LT
2219 err = b44_get_invariants(bp);
2220 if (err) {
9b91cf9d 2221 dev_err(&pdev->dev,
2e8a538d 2222 "Problem fetching invariants of chip, aborting.\n");
1da177e4
LT
2223 goto err_out_iounmap;
2224 }
2225
2226 bp->mii_if.dev = dev;
2227 bp->mii_if.mdio_read = b44_mii_read;
2228 bp->mii_if.mdio_write = b44_mii_write;
2229 bp->mii_if.phy_id = bp->phy_addr;
2230 bp->mii_if.phy_id_mask = 0x1f;
2231 bp->mii_if.reg_num_mask = 0x1f;
2232
2233 /* By default, advertise all speed/duplex settings. */
2234 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2235 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2236
2237 /* By default, auto-negotiate PAUSE. */
2238 bp->flags |= B44_FLAG_PAUSE_AUTO;
2239
2240 err = register_netdev(dev);
2241 if (err) {
9b91cf9d 2242 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1da177e4
LT
2243 goto err_out_iounmap;
2244 }
2245
2246 pci_set_drvdata(pdev, dev);
2247
2248 pci_save_state(bp->pdev);
2249
10badc21 2250 /* Chip reset provides power to the b44 MAC & PCI cores, which
5c513129 2251 * is necessary for MAC register access.
10badc21 2252 */
5c513129
GZ
2253 b44_chip_reset(bp);
2254
1da177e4
LT
2255 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2256 for (i = 0; i < 6; i++)
2257 printk("%2.2x%c", dev->dev_addr[i],
2258 i == 5 ? '\n' : ':');
2259
2260 return 0;
2261
2262err_out_iounmap:
2263 iounmap(bp->regs);
2264
2265err_out_free_dev:
2266 free_netdev(dev);
2267
2268err_out_free_res:
2269 pci_release_regions(pdev);
2270
2271err_out_disable_pdev:
2272 pci_disable_device(pdev);
2273 pci_set_drvdata(pdev, NULL);
2274 return err;
2275}
2276
2277static void __devexit b44_remove_one(struct pci_dev *pdev)
2278{
2279 struct net_device *dev = pci_get_drvdata(pdev);
874a6214 2280 struct b44 *bp = netdev_priv(dev);
1da177e4 2281
874a6214
FR
2282 unregister_netdev(dev);
2283 iounmap(bp->regs);
2284 free_netdev(dev);
2285 pci_release_regions(pdev);
2286 pci_disable_device(pdev);
2287 pci_set_drvdata(pdev, NULL);
1da177e4
LT
2288}
2289
2290static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2291{
2292 struct net_device *dev = pci_get_drvdata(pdev);
2293 struct b44 *bp = netdev_priv(dev);
2294
2295 if (!netif_running(dev))
2296 return 0;
2297
2298 del_timer_sync(&bp->timer);
2299
10badc21 2300 spin_lock_irq(&bp->lock);
1da177e4
LT
2301
2302 b44_halt(bp);
10badc21 2303 netif_carrier_off(bp->dev);
1da177e4
LT
2304 netif_device_detach(bp->dev);
2305 b44_free_rings(bp);
2306
2307 spin_unlock_irq(&bp->lock);
46e17853
PM
2308
2309 free_irq(dev->irq, dev);
52cafd96 2310 if (bp->flags & B44_FLAG_WOL_ENABLE) {
5fc7d61a 2311 b44_init_hw(bp, B44_PARTIAL_RESET);
52cafd96
GZ
2312 b44_setup_wol(bp);
2313 }
d58da590 2314 pci_disable_device(pdev);
1da177e4
LT
2315 return 0;
2316}
2317
2318static int b44_resume(struct pci_dev *pdev)
2319{
2320 struct net_device *dev = pci_get_drvdata(pdev);
2321 struct b44 *bp = netdev_priv(dev);
90afd0e5 2322 int rc = 0;
1da177e4
LT
2323
2324 pci_restore_state(pdev);
90afd0e5
DM
2325 rc = pci_enable_device(pdev);
2326 if (rc) {
2327 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2328 dev->name);
2329 return rc;
2330 }
2331
d58da590 2332 pci_set_master(pdev);
1da177e4
LT
2333
2334 if (!netif_running(dev))
2335 return 0;
2336
90afd0e5
DM
2337 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2338 if (rc) {
46e17853 2339 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
90afd0e5
DM
2340 pci_disable_device(pdev);
2341 return rc;
2342 }
46e17853 2343
1da177e4
LT
2344 spin_lock_irq(&bp->lock);
2345
2346 b44_init_rings(bp);
5fc7d61a 2347 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
2348 netif_device_attach(bp->dev);
2349 spin_unlock_irq(&bp->lock);
2350
2351 bp->timer.expires = jiffies + HZ;
2352 add_timer(&bp->timer);
2353
2354 b44_enable_ints(bp);
d9e2d185 2355 netif_wake_queue(dev);
1da177e4
LT
2356 return 0;
2357}
2358
2359static struct pci_driver b44_driver = {
2360 .name = DRV_MODULE_NAME,
2361 .id_table = b44_pci_tbl,
2362 .probe = b44_init_one,
2363 .remove = __devexit_p(b44_remove_one),
2364 .suspend = b44_suspend,
2365 .resume = b44_resume,
2366};
2367
2368static int __init b44_init(void)
2369{
9f38c636
JL
2370 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2371
2372 /* Setup paramaters for syncing RX/TX DMA descriptors */
2373 dma_desc_align_mask = ~(dma_desc_align_size - 1);
22d4d771 2374 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
9f38c636 2375
29917620 2376 return pci_register_driver(&b44_driver);
1da177e4
LT
2377}
2378
2379static void __exit b44_cleanup(void)
2380{
2381 pci_unregister_driver(&b44_driver);
2382}
2383
2384module_init(b44_init);
2385module_exit(b44_cleanup);
2386