b44: packet offset is constant
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / b44.c
CommitLineData
1da177e4
LT
1/* b44.c: Broadcom 4400 device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
8056bfaf 5 * Copyright (C) 2006 Broadcom Corporation.
1da177e4
LT
6 *
7 * Distribute under GPL.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/types.h>
14#include <linux/netdevice.h>
15#include <linux/ethtool.h>
16#include <linux/mii.h>
17#include <linux/if_ether.h>
72f4861e 18#include <linux/if_vlan.h>
1da177e4
LT
19#include <linux/etherdevice.h>
20#include <linux/pci.h>
21#include <linux/delay.h>
22#include <linux/init.h>
89358f90 23#include <linux/dma-mapping.h>
1da177e4
LT
24
25#include <asm/uaccess.h>
26#include <asm/io.h>
27#include <asm/irq.h>
28
29#include "b44.h"
30
31#define DRV_MODULE_NAME "b44"
32#define PFX DRV_MODULE_NAME ": "
4d1dabdb
GZ
33#define DRV_MODULE_VERSION "1.01"
34#define DRV_MODULE_RELDATE "Jun 16, 2006"
1da177e4
LT
35
36#define B44_DEF_MSG_ENABLE \
37 (NETIF_MSG_DRV | \
38 NETIF_MSG_PROBE | \
39 NETIF_MSG_LINK | \
40 NETIF_MSG_TIMER | \
41 NETIF_MSG_IFDOWN | \
42 NETIF_MSG_IFUP | \
43 NETIF_MSG_RX_ERR | \
44 NETIF_MSG_TX_ERR)
45
46/* length of time before we decide the hardware is borked,
47 * and dev->tx_timeout() should be called to fix the problem
48 */
49#define B44_TX_TIMEOUT (5 * HZ)
50
51/* hardware minimum and maximum for a single frame's data payload */
52#define B44_MIN_MTU 60
53#define B44_MAX_MTU 1500
54
55#define B44_RX_RING_SIZE 512
56#define B44_DEF_RX_RING_PENDING 200
57#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
58 B44_RX_RING_SIZE)
59#define B44_TX_RING_SIZE 512
60#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
61#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
62 B44_TX_RING_SIZE)
1da177e4
LT
63
64#define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66#define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72f4861e
SH
72#define RX_PKT_OFFSET 30
73#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET + 64)
1da177e4
LT
74
75/* minimum number of free TX descriptors required to wake up TX process */
76#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
77
725ad800
GZ
78/* b44 internal pattern match filter info */
79#define B44_PATTERN_BASE 0x400
80#define B44_PATTERN_SIZE 0x80
81#define B44_PMASK_BASE 0x600
82#define B44_PMASK_SIZE 0x10
83#define B44_MAX_PATTERNS 16
84#define B44_ETHIPV6UDP_HLEN 62
85#define B44_ETHIPV4UDP_HLEN 42
86
1da177e4
LT
87static char version[] __devinitdata =
88 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
89
90MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92MODULE_LICENSE("GPL");
93MODULE_VERSION(DRV_MODULE_VERSION);
94
95static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
96module_param(b44_debug, int, 0);
97MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98
99static struct pci_device_id b44_pci_tbl[] = {
100 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106 { } /* terminate list with empty entry */
107};
108
109MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
110
111static void b44_halt(struct b44 *);
112static void b44_init_rings(struct b44 *);
5fc7d61a
MC
113
114#define B44_FULL_RESET 1
115#define B44_FULL_RESET_SKIP_PHY 2
116#define B44_PARTIAL_RESET 3
117
00e8b3aa 118static void b44_init_hw(struct b44 *, int);
1da177e4 119
9f38c636
JL
120static int dma_desc_align_mask;
121static int dma_desc_sync_size;
122
3353930d
FR
123static const char b44_gstrings[][ETH_GSTRING_LEN] = {
124#define _B44(x...) # x,
125B44_STAT_REG_DECLARE
126#undef _B44
127};
128
9f38c636
JL
129static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
130 dma_addr_t dma_base,
131 unsigned long offset,
132 enum dma_data_direction dir)
133{
134 dma_sync_single_range_for_device(&pdev->dev, dma_base,
135 offset & dma_desc_align_mask,
136 dma_desc_sync_size, dir);
137}
138
139static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
140 dma_addr_t dma_base,
141 unsigned long offset,
142 enum dma_data_direction dir)
143{
144 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
145 offset & dma_desc_align_mask,
146 dma_desc_sync_size, dir);
147}
148
1da177e4
LT
149static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
150{
151 return readl(bp->regs + reg);
152}
153
10badc21 154static inline void bw32(const struct b44 *bp,
1da177e4
LT
155 unsigned long reg, unsigned long val)
156{
157 writel(val, bp->regs + reg);
158}
159
160static int b44_wait_bit(struct b44 *bp, unsigned long reg,
161 u32 bit, unsigned long timeout, const int clear)
162{
163 unsigned long i;
164
165 for (i = 0; i < timeout; i++) {
166 u32 val = br32(bp, reg);
167
168 if (clear && !(val & bit))
169 break;
170 if (!clear && (val & bit))
171 break;
172 udelay(10);
173 }
174 if (i == timeout) {
175 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
176 "%lx to %s.\n",
177 bp->dev->name,
178 bit, reg,
179 (clear ? "clear" : "set"));
180 return -ENODEV;
181 }
182 return 0;
183}
184
185/* Sonics SiliconBackplane support routines. ROFL, you should see all the
186 * buzz words used on this company's website :-)
187 *
188 * All of these routines must be invoked with bp->lock held and
189 * interrupts disabled.
190 */
191
192#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
193#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
194
195static u32 ssb_get_core_rev(struct b44 *bp)
196{
197 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
198}
199
200static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
201{
202 u32 bar_orig, pci_rev, val;
203
204 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
205 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
206 pci_rev = ssb_get_core_rev(bp);
207
208 val = br32(bp, B44_SBINTVEC);
209 val |= cores;
210 bw32(bp, B44_SBINTVEC, val);
211
212 val = br32(bp, SSB_PCI_TRANS_2);
213 val |= SSB_PCI_PREF | SSB_PCI_BURST;
214 bw32(bp, SSB_PCI_TRANS_2, val);
215
216 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
217
218 return pci_rev;
219}
220
221static void ssb_core_disable(struct b44 *bp)
222{
223 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
224 return;
225
226 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
227 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
228 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
229 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
230 SBTMSLOW_REJECT | SBTMSLOW_RESET));
231 br32(bp, B44_SBTMSLOW);
232 udelay(1);
233 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
234 br32(bp, B44_SBTMSLOW);
235 udelay(1);
236}
237
238static void ssb_core_reset(struct b44 *bp)
239{
240 u32 val;
241
242 ssb_core_disable(bp);
243 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
244 br32(bp, B44_SBTMSLOW);
245 udelay(1);
246
247 /* Clear SERR if set, this is a hw bug workaround. */
248 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
249 bw32(bp, B44_SBTMSHIGH, 0);
250
251 val = br32(bp, B44_SBIMSTATE);
252 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
253 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
254
255 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
256 br32(bp, B44_SBTMSLOW);
257 udelay(1);
258
259 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
260 br32(bp, B44_SBTMSLOW);
261 udelay(1);
262}
263
264static int ssb_core_unit(struct b44 *bp)
265{
266#if 0
267 u32 val = br32(bp, B44_SBADMATCH0);
268 u32 base;
269
270 type = val & SBADMATCH0_TYPE_MASK;
271 switch (type) {
272 case 0:
273 base = val & SBADMATCH0_BS0_MASK;
274 break;
275
276 case 1:
277 base = val & SBADMATCH0_BS1_MASK;
278 break;
279
280 case 2:
281 default:
282 base = val & SBADMATCH0_BS2_MASK;
283 break;
284 };
285#endif
286 return 0;
287}
288
289static int ssb_is_core_up(struct b44 *bp)
290{
291 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
292 == SBTMSLOW_CLOCK);
293}
294
295static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
296{
297 u32 val;
298
299 val = ((u32) data[2]) << 24;
300 val |= ((u32) data[3]) << 16;
301 val |= ((u32) data[4]) << 8;
302 val |= ((u32) data[5]) << 0;
303 bw32(bp, B44_CAM_DATA_LO, val);
10badc21 304 val = (CAM_DATA_HI_VALID |
1da177e4
LT
305 (((u32) data[0]) << 8) |
306 (((u32) data[1]) << 0));
307 bw32(bp, B44_CAM_DATA_HI, val);
308 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
309 (index << CAM_CTRL_INDEX_SHIFT)));
10badc21 310 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
1da177e4
LT
311}
312
313static inline void __b44_disable_ints(struct b44 *bp)
314{
315 bw32(bp, B44_IMASK, 0);
316}
317
318static void b44_disable_ints(struct b44 *bp)
319{
320 __b44_disable_ints(bp);
321
322 /* Flush posted writes. */
323 br32(bp, B44_IMASK);
324}
325
326static void b44_enable_ints(struct b44 *bp)
327{
328 bw32(bp, B44_IMASK, bp->imask);
329}
330
331static int b44_readphy(struct b44 *bp, int reg, u32 *val)
332{
333 int err;
334
335 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
338 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
339 (reg << MDIO_DATA_RA_SHIFT) |
340 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
341 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
342 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
343
344 return err;
345}
346
347static int b44_writephy(struct b44 *bp, int reg, u32 val)
348{
349 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
350 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
351 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
352 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
353 (reg << MDIO_DATA_RA_SHIFT) |
354 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
355 (val & MDIO_DATA_DATA)));
356 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
357}
358
359/* miilib interface */
360/* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
361 * due to code existing before miilib use was added to this driver.
362 * Someone should remove this artificial driver limitation in
363 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
364 */
365static int b44_mii_read(struct net_device *dev, int phy_id, int location)
366{
367 u32 val;
368 struct b44 *bp = netdev_priv(dev);
369 int rc = b44_readphy(bp, location, &val);
370 if (rc)
371 return 0xffffffff;
372 return val;
373}
374
375static void b44_mii_write(struct net_device *dev, int phy_id, int location,
376 int val)
377{
378 struct b44 *bp = netdev_priv(dev);
379 b44_writephy(bp, location, val);
380}
381
382static int b44_phy_reset(struct b44 *bp)
383{
384 u32 val;
385 int err;
386
387 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
388 if (err)
389 return err;
390 udelay(100);
391 err = b44_readphy(bp, MII_BMCR, &val);
392 if (!err) {
393 if (val & BMCR_RESET) {
394 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
395 bp->dev->name);
396 err = -ENODEV;
397 }
398 }
399
400 return 0;
401}
402
403static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
404{
405 u32 val;
406
407 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
408 bp->flags |= pause_flags;
409
410 val = br32(bp, B44_RXCONFIG);
411 if (pause_flags & B44_FLAG_RX_PAUSE)
412 val |= RXCONFIG_FLOW;
413 else
414 val &= ~RXCONFIG_FLOW;
415 bw32(bp, B44_RXCONFIG, val);
416
417 val = br32(bp, B44_MAC_FLOW);
418 if (pause_flags & B44_FLAG_TX_PAUSE)
419 val |= (MAC_FLOW_PAUSE_ENAB |
420 (0xc0 & MAC_FLOW_RX_HI_WATER));
421 else
422 val &= ~MAC_FLOW_PAUSE_ENAB;
423 bw32(bp, B44_MAC_FLOW, val);
424}
425
426static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
427{
10badc21 428 u32 pause_enab = 0;
2b474cf5
GZ
429
430 /* The driver supports only rx pause by default because
10badc21
JG
431 the b44 mac tx pause mechanism generates excessive
432 pause frames.
2b474cf5
GZ
433 Use ethtool to turn on b44 tx pause if necessary.
434 */
435 if ((local & ADVERTISE_PAUSE_CAP) &&
10badc21 436 (local & ADVERTISE_PAUSE_ASYM)){
2b474cf5
GZ
437 if ((remote & LPA_PAUSE_ASYM) &&
438 !(remote & LPA_PAUSE_CAP))
439 pause_enab |= B44_FLAG_RX_PAUSE;
1da177e4
LT
440 }
441
442 __b44_set_flow_ctrl(bp, pause_enab);
443}
444
445static int b44_setup_phy(struct b44 *bp)
446{
447 u32 val;
448 int err;
449
450 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
451 goto out;
452 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
453 val & MII_ALEDCTRL_ALLMSK)) != 0)
454 goto out;
455 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
456 goto out;
457 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
458 val | MII_TLEDCTRL_ENABLE)) != 0)
459 goto out;
460
461 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
462 u32 adv = ADVERTISE_CSMA;
463
464 if (bp->flags & B44_FLAG_ADV_10HALF)
465 adv |= ADVERTISE_10HALF;
466 if (bp->flags & B44_FLAG_ADV_10FULL)
467 adv |= ADVERTISE_10FULL;
468 if (bp->flags & B44_FLAG_ADV_100HALF)
469 adv |= ADVERTISE_100HALF;
470 if (bp->flags & B44_FLAG_ADV_100FULL)
471 adv |= ADVERTISE_100FULL;
472
473 if (bp->flags & B44_FLAG_PAUSE_AUTO)
474 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
475
476 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
477 goto out;
478 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
479 BMCR_ANRESTART))) != 0)
480 goto out;
481 } else {
482 u32 bmcr;
483
484 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
485 goto out;
486 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
487 if (bp->flags & B44_FLAG_100_BASE_T)
488 bmcr |= BMCR_SPEED100;
489 if (bp->flags & B44_FLAG_FULL_DUPLEX)
490 bmcr |= BMCR_FULLDPLX;
491 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
492 goto out;
493
494 /* Since we will not be negotiating there is no safe way
495 * to determine if the link partner supports flow control
496 * or not. So just disable it completely in this case.
497 */
498 b44_set_flow_ctrl(bp, 0, 0);
499 }
500
501out:
502 return err;
503}
504
505static void b44_stats_update(struct b44 *bp)
506{
507 unsigned long reg;
508 u32 *val;
509
510 val = &bp->hw_stats.tx_good_octets;
511 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
512 *val++ += br32(bp, reg);
513 }
3353930d
FR
514
515 /* Pad */
516 reg += 8*4UL;
517
1da177e4
LT
518 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
519 *val++ += br32(bp, reg);
520 }
521}
522
523static void b44_link_report(struct b44 *bp)
524{
525 if (!netif_carrier_ok(bp->dev)) {
526 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
527 } else {
528 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
529 bp->dev->name,
530 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
531 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
532
533 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
534 "%s for RX.\n",
535 bp->dev->name,
536 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
537 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
538 }
539}
540
541static void b44_check_phy(struct b44 *bp)
542{
543 u32 bmsr, aux;
544
545 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
546 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
547 (bmsr != 0xffff)) {
548 if (aux & MII_AUXCTRL_SPEED)
549 bp->flags |= B44_FLAG_100_BASE_T;
550 else
551 bp->flags &= ~B44_FLAG_100_BASE_T;
552 if (aux & MII_AUXCTRL_DUPLEX)
553 bp->flags |= B44_FLAG_FULL_DUPLEX;
554 else
555 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
556
557 if (!netif_carrier_ok(bp->dev) &&
558 (bmsr & BMSR_LSTATUS)) {
559 u32 val = br32(bp, B44_TX_CTRL);
560 u32 local_adv, remote_adv;
561
562 if (bp->flags & B44_FLAG_FULL_DUPLEX)
563 val |= TX_CTRL_DUPLEX;
564 else
565 val &= ~TX_CTRL_DUPLEX;
566 bw32(bp, B44_TX_CTRL, val);
567
568 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
569 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
570 !b44_readphy(bp, MII_LPA, &remote_adv))
571 b44_set_flow_ctrl(bp, local_adv, remote_adv);
572
573 /* Link now up */
574 netif_carrier_on(bp->dev);
575 b44_link_report(bp);
576 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
577 /* Link now down */
578 netif_carrier_off(bp->dev);
579 b44_link_report(bp);
580 }
581
582 if (bmsr & BMSR_RFAULT)
583 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
584 bp->dev->name);
585 if (bmsr & BMSR_JCD)
586 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
587 bp->dev->name);
588 }
589}
590
591static void b44_timer(unsigned long __opaque)
592{
593 struct b44 *bp = (struct b44 *) __opaque;
594
595 spin_lock_irq(&bp->lock);
596
597 b44_check_phy(bp);
598
599 b44_stats_update(bp);
600
601 spin_unlock_irq(&bp->lock);
602
a72a8179 603 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
1da177e4
LT
604}
605
606static void b44_tx(struct b44 *bp)
607{
608 u32 cur, cons;
609
610 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
611 cur /= sizeof(struct dma_desc);
612
613 /* XXX needs updating when NETIF_F_SG is supported */
614 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
615 struct ring_info *rp = &bp->tx_buffers[cons];
616 struct sk_buff *skb = rp->skb;
617
5d9428de 618 BUG_ON(skb == NULL);
1da177e4
LT
619
620 pci_unmap_single(bp->pdev,
621 pci_unmap_addr(rp, mapping),
622 skb->len,
623 PCI_DMA_TODEVICE);
624 rp->skb = NULL;
625 dev_kfree_skb_irq(skb);
626 }
627
628 bp->tx_cons = cons;
629 if (netif_queue_stopped(bp->dev) &&
630 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
631 netif_wake_queue(bp->dev);
632
633 bw32(bp, B44_GPTIMER, 0);
634}
635
636/* Works like this. This chip writes a 'struct rx_header" 30 bytes
637 * before the DMA address you give it. So we allocate 30 more bytes
638 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
639 * point the chip at 30 bytes past where the rx_header will go.
640 */
641static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
642{
643 struct dma_desc *dp;
644 struct ring_info *src_map, *map;
645 struct rx_header *rh;
646 struct sk_buff *skb;
647 dma_addr_t mapping;
648 int dest_idx;
649 u32 ctrl;
650
651 src_map = NULL;
652 if (src_idx >= 0)
653 src_map = &bp->rx_buffers[src_idx];
654 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
655 map = &bp->rx_buffers[dest_idx];
656 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
657 if (skb == NULL)
658 return -ENOMEM;
659
660 mapping = pci_map_single(bp->pdev, skb->data,
661 RX_PKT_BUF_SZ,
662 PCI_DMA_FROMDEVICE);
663
664 /* Hardware bug work-around, the chip is unable to do PCI DMA
665 to/from anything above 1GB :-( */
639b421b 666 if (dma_mapping_error(mapping) ||
97db9ee7 667 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
1da177e4 668 /* Sigh... */
639b421b
AK
669 if (!dma_mapping_error(mapping))
670 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
1da177e4
LT
671 dev_kfree_skb_any(skb);
672 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
673 if (skb == NULL)
674 return -ENOMEM;
675 mapping = pci_map_single(bp->pdev, skb->data,
676 RX_PKT_BUF_SZ,
677 PCI_DMA_FROMDEVICE);
639b421b 678 if (dma_mapping_error(mapping) ||
97db9ee7 679 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
639b421b
AK
680 if (!dma_mapping_error(mapping))
681 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
1da177e4
LT
682 dev_kfree_skb_any(skb);
683 return -ENOMEM;
684 }
685 }
686
687 skb->dev = bp->dev;
72f4861e
SH
688 rh = (struct rx_header *) skb->data;
689 skb_reserve(skb, RX_PKT_OFFSET);
1da177e4 690
1da177e4
LT
691 rh->len = 0;
692 rh->flags = 0;
693
694 map->skb = skb;
695 pci_unmap_addr_set(map, mapping, mapping);
696
697 if (src_map != NULL)
698 src_map->skb = NULL;
699
72f4861e 700 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET));
1da177e4
LT
701 if (dest_idx == (B44_RX_RING_SIZE - 1))
702 ctrl |= DESC_CTRL_EOT;
703
704 dp = &bp->rx_ring[dest_idx];
705 dp->ctrl = cpu_to_le32(ctrl);
72f4861e 706 dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
1da177e4 707
9f38c636
JL
708 if (bp->flags & B44_FLAG_RX_RING_HACK)
709 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
710 dest_idx * sizeof(dp),
711 DMA_BIDIRECTIONAL);
712
1da177e4
LT
713 return RX_PKT_BUF_SZ;
714}
715
716static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
717{
718 struct dma_desc *src_desc, *dest_desc;
719 struct ring_info *src_map, *dest_map;
720 struct rx_header *rh;
721 int dest_idx;
a7bed27d 722 __le32 ctrl;
1da177e4
LT
723
724 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
725 dest_desc = &bp->rx_ring[dest_idx];
726 dest_map = &bp->rx_buffers[dest_idx];
727 src_desc = &bp->rx_ring[src_idx];
728 src_map = &bp->rx_buffers[src_idx];
729
730 dest_map->skb = src_map->skb;
731 rh = (struct rx_header *) src_map->skb->data;
732 rh->len = 0;
733 rh->flags = 0;
734 pci_unmap_addr_set(dest_map, mapping,
735 pci_unmap_addr(src_map, mapping));
736
9f38c636
JL
737 if (bp->flags & B44_FLAG_RX_RING_HACK)
738 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
739 src_idx * sizeof(src_desc),
740 DMA_BIDIRECTIONAL);
741
1da177e4
LT
742 ctrl = src_desc->ctrl;
743 if (dest_idx == (B44_RX_RING_SIZE - 1))
744 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
745 else
746 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
747
748 dest_desc->ctrl = ctrl;
749 dest_desc->addr = src_desc->addr;
9f38c636 750
1da177e4
LT
751 src_map->skb = NULL;
752
9f38c636
JL
753 if (bp->flags & B44_FLAG_RX_RING_HACK)
754 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
755 dest_idx * sizeof(dest_desc),
756 DMA_BIDIRECTIONAL);
757
00576e93 758 pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
1da177e4
LT
759 RX_PKT_BUF_SZ,
760 PCI_DMA_FROMDEVICE);
761}
762
763static int b44_rx(struct b44 *bp, int budget)
764{
765 int received;
766 u32 cons, prod;
767
768 received = 0;
769 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
770 prod /= sizeof(struct dma_desc);
771 cons = bp->rx_cons;
772
773 while (cons != prod && budget > 0) {
774 struct ring_info *rp = &bp->rx_buffers[cons];
775 struct sk_buff *skb = rp->skb;
776 dma_addr_t map = pci_unmap_addr(rp, mapping);
777 struct rx_header *rh;
778 u16 len;
779
780 pci_dma_sync_single_for_cpu(bp->pdev, map,
781 RX_PKT_BUF_SZ,
782 PCI_DMA_FROMDEVICE);
783 rh = (struct rx_header *) skb->data;
a7bed27d 784 len = le16_to_cpu(rh->len);
72f4861e 785 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
1da177e4
LT
786 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
787 drop_it:
788 b44_recycle_rx(bp, cons, bp->rx_prod);
789 drop_it_no_recycle:
790 bp->stats.rx_dropped++;
791 goto next_pkt;
792 }
793
794 if (len == 0) {
795 int i = 0;
796
797 do {
798 udelay(2);
799 barrier();
a7bed27d 800 len = le16_to_cpu(rh->len);
1da177e4
LT
801 } while (len == 0 && i++ < 5);
802 if (len == 0)
803 goto drop_it;
804 }
805
806 /* Omit CRC. */
807 len -= 4;
808
809 if (len > RX_COPY_THRESHOLD) {
810 int skb_size;
811 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
812 if (skb_size < 0)
813 goto drop_it;
814 pci_unmap_single(bp->pdev, map,
815 skb_size, PCI_DMA_FROMDEVICE);
816 /* Leave out rx_header */
72f4861e
SH
817 skb_put(skb, len + RX_PKT_OFFSET);
818 skb_pull(skb, RX_PKT_OFFSET);
1da177e4
LT
819 } else {
820 struct sk_buff *copy_skb;
821
822 b44_recycle_rx(bp, cons, bp->rx_prod);
823 copy_skb = dev_alloc_skb(len + 2);
824 if (copy_skb == NULL)
825 goto drop_it_no_recycle;
826
1da177e4
LT
827 skb_reserve(copy_skb, 2);
828 skb_put(copy_skb, len);
829 /* DMA sync done above, copy just the actual packet */
72f4861e 830 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
d626f62b 831 copy_skb->data, len);
1da177e4
LT
832 skb = copy_skb;
833 }
834 skb->ip_summed = CHECKSUM_NONE;
835 skb->protocol = eth_type_trans(skb, bp->dev);
836 netif_receive_skb(skb);
837 bp->dev->last_rx = jiffies;
838 received++;
839 budget--;
840 next_pkt:
841 bp->rx_prod = (bp->rx_prod + 1) &
842 (B44_RX_RING_SIZE - 1);
843 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
844 }
845
846 bp->rx_cons = cons;
847 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
848
849 return received;
850}
851
852static int b44_poll(struct net_device *netdev, int *budget)
853{
854 struct b44 *bp = netdev_priv(netdev);
855 int done;
856
857 spin_lock_irq(&bp->lock);
858
859 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
860 /* spin_lock(&bp->tx_lock); */
861 b44_tx(bp);
862 /* spin_unlock(&bp->tx_lock); */
863 }
864 spin_unlock_irq(&bp->lock);
865
866 done = 1;
867 if (bp->istat & ISTAT_RX) {
868 int orig_budget = *budget;
869 int work_done;
870
871 if (orig_budget > netdev->quota)
872 orig_budget = netdev->quota;
873
874 work_done = b44_rx(bp, orig_budget);
875
876 *budget -= work_done;
877 netdev->quota -= work_done;
878
879 if (work_done >= orig_budget)
880 done = 0;
881 }
882
883 if (bp->istat & ISTAT_ERRORS) {
d15e9c4d
FR
884 unsigned long flags;
885
886 spin_lock_irqsave(&bp->lock, flags);
1da177e4
LT
887 b44_halt(bp);
888 b44_init_rings(bp);
5fc7d61a 889 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
1da177e4 890 netif_wake_queue(bp->dev);
d15e9c4d 891 spin_unlock_irqrestore(&bp->lock, flags);
1da177e4
LT
892 done = 1;
893 }
894
895 if (done) {
896 netif_rx_complete(netdev);
897 b44_enable_ints(bp);
898 }
899
900 return (done ? 0 : 1);
901}
902
7d12e780 903static irqreturn_t b44_interrupt(int irq, void *dev_id)
1da177e4
LT
904{
905 struct net_device *dev = dev_id;
906 struct b44 *bp = netdev_priv(dev);
1da177e4
LT
907 u32 istat, imask;
908 int handled = 0;
909
65b984f2 910 spin_lock(&bp->lock);
1da177e4
LT
911
912 istat = br32(bp, B44_ISTAT);
913 imask = br32(bp, B44_IMASK);
914
e78181fe
JB
915 /* The interrupt mask register controls which interrupt bits
916 * will actually raise an interrupt to the CPU when set by hw/firmware,
917 * but doesn't mask off the bits.
1da177e4
LT
918 */
919 istat &= imask;
920 if (istat) {
921 handled = 1;
ba5eec9c
FR
922
923 if (unlikely(!netif_running(dev))) {
924 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
925 goto irq_ack;
926 }
927
1da177e4
LT
928 if (netif_rx_schedule_prep(dev)) {
929 /* NOTE: These writes are posted by the readback of
930 * the ISTAT register below.
931 */
932 bp->istat = istat;
933 __b44_disable_ints(bp);
934 __netif_rx_schedule(dev);
935 } else {
936 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
937 dev->name);
938 }
939
ba5eec9c 940irq_ack:
1da177e4
LT
941 bw32(bp, B44_ISTAT, istat);
942 br32(bp, B44_ISTAT);
943 }
65b984f2 944 spin_unlock(&bp->lock);
1da177e4
LT
945 return IRQ_RETVAL(handled);
946}
947
948static void b44_tx_timeout(struct net_device *dev)
949{
950 struct b44 *bp = netdev_priv(dev);
951
952 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
953 dev->name);
954
955 spin_lock_irq(&bp->lock);
956
957 b44_halt(bp);
958 b44_init_rings(bp);
5fc7d61a 959 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
960
961 spin_unlock_irq(&bp->lock);
962
963 b44_enable_ints(bp);
964
965 netif_wake_queue(dev);
966}
967
968static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
969{
970 struct b44 *bp = netdev_priv(dev);
c7193693 971 int rc = NETDEV_TX_OK;
1da177e4
LT
972 dma_addr_t mapping;
973 u32 len, entry, ctrl;
974
975 len = skb->len;
976 spin_lock_irq(&bp->lock);
977
978 /* This is a hard error, log it. */
979 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
980 netif_stop_queue(dev);
1da177e4
LT
981 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
982 dev->name);
c7193693 983 goto err_out;
1da177e4
LT
984 }
985
986 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
97db9ee7 987 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
f65a7177
SH
988 struct sk_buff *bounce_skb;
989
1da177e4 990 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
639b421b
AK
991 if (!dma_mapping_error(mapping))
992 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
1da177e4 993
f65a7177 994 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
1da177e4 995 if (!bounce_skb)
c7193693 996 goto err_out;
1da177e4
LT
997
998 mapping = pci_map_single(bp->pdev, bounce_skb->data,
999 len, PCI_DMA_TODEVICE);
97db9ee7 1000 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
639b421b
AK
1001 if (!dma_mapping_error(mapping))
1002 pci_unmap_single(bp->pdev, mapping,
f65a7177 1003 len, PCI_DMA_TODEVICE);
1da177e4 1004 dev_kfree_skb_any(bounce_skb);
c7193693 1005 goto err_out;
1da177e4
LT
1006 }
1007
f65a7177 1008 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1da177e4
LT
1009 dev_kfree_skb_any(skb);
1010 skb = bounce_skb;
1011 }
1012
1013 entry = bp->tx_prod;
1014 bp->tx_buffers[entry].skb = skb;
1015 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1016
1017 ctrl = (len & DESC_CTRL_LEN);
1018 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1019 if (entry == (B44_TX_RING_SIZE - 1))
1020 ctrl |= DESC_CTRL_EOT;
1021
1022 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1023 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1024
9f38c636
JL
1025 if (bp->flags & B44_FLAG_TX_RING_HACK)
1026 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1027 entry * sizeof(bp->tx_ring[0]),
1028 DMA_TO_DEVICE);
1029
1da177e4
LT
1030 entry = NEXT_TX(entry);
1031
1032 bp->tx_prod = entry;
1033
1034 wmb();
1035
1036 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1037 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1038 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1039 if (bp->flags & B44_FLAG_REORDER_BUG)
1040 br32(bp, B44_DMATX_PTR);
1041
1042 if (TX_BUFFS_AVAIL(bp) < 1)
1043 netif_stop_queue(dev);
1044
c7193693
FR
1045 dev->trans_start = jiffies;
1046
1047out_unlock:
1da177e4
LT
1048 spin_unlock_irq(&bp->lock);
1049
c7193693 1050 return rc;
1da177e4 1051
c7193693
FR
1052err_out:
1053 rc = NETDEV_TX_BUSY;
1054 goto out_unlock;
1da177e4
LT
1055}
1056
1057static int b44_change_mtu(struct net_device *dev, int new_mtu)
1058{
1059 struct b44 *bp = netdev_priv(dev);
1060
1061 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1062 return -EINVAL;
1063
1064 if (!netif_running(dev)) {
1065 /* We'll just catch it later when the
1066 * device is up'd.
1067 */
1068 dev->mtu = new_mtu;
1069 return 0;
1070 }
1071
1072 spin_lock_irq(&bp->lock);
1073 b44_halt(bp);
1074 dev->mtu = new_mtu;
1075 b44_init_rings(bp);
5fc7d61a 1076 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1077 spin_unlock_irq(&bp->lock);
1078
1079 b44_enable_ints(bp);
10badc21 1080
1da177e4
LT
1081 return 0;
1082}
1083
1084/* Free up pending packets in all rx/tx rings.
1085 *
1086 * The chip has been shut down and the driver detached from
1087 * the networking, so no interrupts or new tx packets will
1088 * end up in the driver. bp->lock is not held and we are not
1089 * in an interrupt context and thus may sleep.
1090 */
1091static void b44_free_rings(struct b44 *bp)
1092{
1093 struct ring_info *rp;
1094 int i;
1095
1096 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1097 rp = &bp->rx_buffers[i];
1098
1099 if (rp->skb == NULL)
1100 continue;
1101 pci_unmap_single(bp->pdev,
1102 pci_unmap_addr(rp, mapping),
1103 RX_PKT_BUF_SZ,
1104 PCI_DMA_FROMDEVICE);
1105 dev_kfree_skb_any(rp->skb);
1106 rp->skb = NULL;
1107 }
1108
1109 /* XXX needs changes once NETIF_F_SG is set... */
1110 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1111 rp = &bp->tx_buffers[i];
1112
1113 if (rp->skb == NULL)
1114 continue;
1115 pci_unmap_single(bp->pdev,
1116 pci_unmap_addr(rp, mapping),
1117 rp->skb->len,
1118 PCI_DMA_TODEVICE);
1119 dev_kfree_skb_any(rp->skb);
1120 rp->skb = NULL;
1121 }
1122}
1123
1124/* Initialize tx/rx rings for packet processing.
1125 *
1126 * The chip has been shut down and the driver detached from
1127 * the networking, so no interrupts or new tx packets will
874a6214 1128 * end up in the driver.
1da177e4
LT
1129 */
1130static void b44_init_rings(struct b44 *bp)
1131{
1132 int i;
1133
1134 b44_free_rings(bp);
1135
1136 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1137 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1138
9f38c636
JL
1139 if (bp->flags & B44_FLAG_RX_RING_HACK)
1140 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1141 DMA_TABLE_BYTES,
1142 PCI_DMA_BIDIRECTIONAL);
1143
1144 if (bp->flags & B44_FLAG_TX_RING_HACK)
1145 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1146 DMA_TABLE_BYTES,
1147 PCI_DMA_TODEVICE);
1148
1da177e4
LT
1149 for (i = 0; i < bp->rx_pending; i++) {
1150 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1151 break;
1152 }
1153}
1154
1155/*
1156 * Must not be invoked with interrupt sources disabled and
1157 * the hardware shutdown down.
1158 */
1159static void b44_free_consistent(struct b44 *bp)
1160{
b4558ea9
JJ
1161 kfree(bp->rx_buffers);
1162 bp->rx_buffers = NULL;
1163 kfree(bp->tx_buffers);
1164 bp->tx_buffers = NULL;
1da177e4 1165 if (bp->rx_ring) {
9f38c636
JL
1166 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1167 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1168 DMA_TABLE_BYTES,
1169 DMA_BIDIRECTIONAL);
1170 kfree(bp->rx_ring);
1171 } else
1172 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1173 bp->rx_ring, bp->rx_ring_dma);
1da177e4 1174 bp->rx_ring = NULL;
9f38c636 1175 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1da177e4
LT
1176 }
1177 if (bp->tx_ring) {
9f38c636
JL
1178 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1179 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1180 DMA_TABLE_BYTES,
1181 DMA_TO_DEVICE);
1182 kfree(bp->tx_ring);
1183 } else
1184 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1185 bp->tx_ring, bp->tx_ring_dma);
1da177e4 1186 bp->tx_ring = NULL;
9f38c636 1187 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1da177e4
LT
1188 }
1189}
1190
1191/*
1192 * Must not be invoked with interrupt sources disabled and
1193 * the hardware shutdown down. Can sleep.
1194 */
1195static int b44_alloc_consistent(struct b44 *bp)
1196{
1197 int size;
1198
1199 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
874a6214 1200 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1da177e4
LT
1201 if (!bp->rx_buffers)
1202 goto out_err;
1da177e4
LT
1203
1204 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
874a6214 1205 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1da177e4
LT
1206 if (!bp->tx_buffers)
1207 goto out_err;
1da177e4
LT
1208
1209 size = DMA_TABLE_BYTES;
1210 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
9f38c636
JL
1211 if (!bp->rx_ring) {
1212 /* Allocation may have failed due to pci_alloc_consistent
1213 insisting on use of GFP_DMA, which is more restrictive
1214 than necessary... */
1215 struct dma_desc *rx_ring;
1216 dma_addr_t rx_ring_dma;
1217
874a6214
FR
1218 rx_ring = kzalloc(size, GFP_KERNEL);
1219 if (!rx_ring)
9f38c636
JL
1220 goto out_err;
1221
9f38c636
JL
1222 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1223 DMA_TABLE_BYTES,
1224 DMA_BIDIRECTIONAL);
1225
639b421b 1226 if (dma_mapping_error(rx_ring_dma) ||
97db9ee7 1227 rx_ring_dma + size > DMA_30BIT_MASK) {
9f38c636
JL
1228 kfree(rx_ring);
1229 goto out_err;
1230 }
1231
1232 bp->rx_ring = rx_ring;
1233 bp->rx_ring_dma = rx_ring_dma;
1234 bp->flags |= B44_FLAG_RX_RING_HACK;
1235 }
1da177e4
LT
1236
1237 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
9f38c636
JL
1238 if (!bp->tx_ring) {
1239 /* Allocation may have failed due to pci_alloc_consistent
1240 insisting on use of GFP_DMA, which is more restrictive
1241 than necessary... */
1242 struct dma_desc *tx_ring;
1243 dma_addr_t tx_ring_dma;
1244
874a6214
FR
1245 tx_ring = kzalloc(size, GFP_KERNEL);
1246 if (!tx_ring)
9f38c636
JL
1247 goto out_err;
1248
9f38c636
JL
1249 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1250 DMA_TABLE_BYTES,
1251 DMA_TO_DEVICE);
1252
639b421b 1253 if (dma_mapping_error(tx_ring_dma) ||
97db9ee7 1254 tx_ring_dma + size > DMA_30BIT_MASK) {
9f38c636
JL
1255 kfree(tx_ring);
1256 goto out_err;
1257 }
1258
1259 bp->tx_ring = tx_ring;
1260 bp->tx_ring_dma = tx_ring_dma;
1261 bp->flags |= B44_FLAG_TX_RING_HACK;
1262 }
1da177e4
LT
1263
1264 return 0;
1265
1266out_err:
1267 b44_free_consistent(bp);
1268 return -ENOMEM;
1269}
1270
1271/* bp->lock is held. */
1272static void b44_clear_stats(struct b44 *bp)
1273{
1274 unsigned long reg;
1275
1276 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1277 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1278 br32(bp, reg);
1279 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1280 br32(bp, reg);
1281}
1282
1283/* bp->lock is held. */
1284static void b44_chip_reset(struct b44 *bp)
1285{
1286 if (ssb_is_core_up(bp)) {
1287 bw32(bp, B44_RCV_LAZY, 0);
1288 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
40ee8c76 1289 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1da177e4
LT
1290 bw32(bp, B44_DMATX_CTRL, 0);
1291 bp->tx_prod = bp->tx_cons = 0;
1292 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1293 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1294 100, 0);
1295 }
1296 bw32(bp, B44_DMARX_CTRL, 0);
1297 bp->rx_prod = bp->rx_cons = 0;
1298 } else {
1299 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1300 SBINTVEC_ENET0 :
1301 SBINTVEC_ENET1));
1302 }
1303
1304 ssb_core_reset(bp);
1305
1306 b44_clear_stats(bp);
1307
1308 /* Make PHY accessible. */
1309 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1310 (0x0d & MDIO_CTRL_MAXF_MASK)));
1311 br32(bp, B44_MDIO_CTRL);
1312
1313 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1314 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1315 br32(bp, B44_ENET_CTRL);
1316 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1317 } else {
1318 u32 val = br32(bp, B44_DEVCTRL);
1319
1320 if (val & DEVCTRL_EPR) {
1321 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1322 br32(bp, B44_DEVCTRL);
1323 udelay(100);
1324 }
1325 bp->flags |= B44_FLAG_INTERNAL_PHY;
1326 }
1327}
1328
1329/* bp->lock is held. */
1330static void b44_halt(struct b44 *bp)
1331{
1332 b44_disable_ints(bp);
1333 b44_chip_reset(bp);
1334}
1335
1336/* bp->lock is held. */
1337static void __b44_set_mac_addr(struct b44 *bp)
1338{
1339 bw32(bp, B44_CAM_CTRL, 0);
1340 if (!(bp->dev->flags & IFF_PROMISC)) {
1341 u32 val;
1342
1343 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1344 val = br32(bp, B44_CAM_CTRL);
1345 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1346 }
1347}
1348
1349static int b44_set_mac_addr(struct net_device *dev, void *p)
1350{
1351 struct b44 *bp = netdev_priv(dev);
1352 struct sockaddr *addr = p;
1353
1354 if (netif_running(dev))
1355 return -EBUSY;
1356
391fc09a
GZ
1357 if (!is_valid_ether_addr(addr->sa_data))
1358 return -EINVAL;
1359
1da177e4
LT
1360 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1361
1362 spin_lock_irq(&bp->lock);
1363 __b44_set_mac_addr(bp);
1364 spin_unlock_irq(&bp->lock);
1365
1366 return 0;
1367}
1368
1369/* Called at device open time to get the chip ready for
1370 * packet processing. Invoked with bp->lock held.
1371 */
1372static void __b44_set_rx_mode(struct net_device *);
5fc7d61a 1373static void b44_init_hw(struct b44 *bp, int reset_kind)
1da177e4
LT
1374{
1375 u32 val;
1376
1377 b44_chip_reset(bp);
5fc7d61a 1378 if (reset_kind == B44_FULL_RESET) {
00e8b3aa
GZ
1379 b44_phy_reset(bp);
1380 b44_setup_phy(bp);
1381 }
1da177e4
LT
1382
1383 /* Enable CRC32, set proper LED modes and power on PHY */
1384 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1385 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1386
1387 /* This sets the MAC address too. */
1388 __b44_set_rx_mode(bp->dev);
1389
1390 /* MTU + eth header + possible VLAN tag + struct rx_header */
1391 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1392 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1393
1394 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
5fc7d61a
MC
1395 if (reset_kind == B44_PARTIAL_RESET) {
1396 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
72f4861e 1397 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
5fc7d61a 1398 } else {
00e8b3aa
GZ
1399 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1400 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1401 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
72f4861e 1402 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
00e8b3aa 1403 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1da177e4 1404
00e8b3aa
GZ
1405 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1406 bp->rx_prod = bp->rx_pending;
1da177e4 1407
00e8b3aa 1408 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
00e8b3aa 1409 }
1da177e4
LT
1410
1411 val = br32(bp, B44_ENET_CTRL);
1412 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1413}
1414
1415static int b44_open(struct net_device *dev)
1416{
1417 struct b44 *bp = netdev_priv(dev);
1418 int err;
1419
1420 err = b44_alloc_consistent(bp);
1421 if (err)
6c2f4267 1422 goto out;
1da177e4
LT
1423
1424 b44_init_rings(bp);
5fc7d61a 1425 b44_init_hw(bp, B44_FULL_RESET);
1da177e4 1426
e254e9bf
JL
1427 b44_check_phy(bp);
1428
1fb9df5d 1429 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
6c2f4267
FR
1430 if (unlikely(err < 0)) {
1431 b44_chip_reset(bp);
1432 b44_free_rings(bp);
1433 b44_free_consistent(bp);
1434 goto out;
1435 }
1da177e4
LT
1436
1437 init_timer(&bp->timer);
1438 bp->timer.expires = jiffies + HZ;
1439 bp->timer.data = (unsigned long) bp;
1440 bp->timer.function = b44_timer;
1441 add_timer(&bp->timer);
1442
1443 b44_enable_ints(bp);
d9e2d185 1444 netif_start_queue(dev);
6c2f4267 1445out:
1da177e4
LT
1446 return err;
1447}
1448
1449#if 0
1450/*static*/ void b44_dump_state(struct b44 *bp)
1451{
1452 u32 val32, val32_2, val32_3, val32_4, val32_5;
1453 u16 val16;
1454
1455 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1456 printk("DEBUG: PCI status [%04x] \n", val16);
1457
1458}
1459#endif
1460
1461#ifdef CONFIG_NET_POLL_CONTROLLER
1462/*
1463 * Polling receive - used by netconsole and other diagnostic tools
1464 * to allow network i/o with interrupts disabled.
1465 */
1466static void b44_poll_controller(struct net_device *dev)
1467{
1468 disable_irq(dev->irq);
7d12e780 1469 b44_interrupt(dev->irq, dev);
1da177e4
LT
1470 enable_irq(dev->irq);
1471}
1472#endif
1473
725ad800
GZ
1474static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1475{
1476 u32 i;
1477 u32 *pattern = (u32 *) pp;
1478
1479 for (i = 0; i < bytes; i += sizeof(u32)) {
1480 bw32(bp, B44_FILT_ADDR, table_offset + i);
1481 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1482 }
1483}
1484
1485static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1486{
1487 int magicsync = 6;
1488 int k, j, len = offset;
1489 int ethaddr_bytes = ETH_ALEN;
1490
1491 memset(ppattern + offset, 0xff, magicsync);
1492 for (j = 0; j < magicsync; j++)
1493 set_bit(len++, (unsigned long *) pmask);
1494
1495 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1496 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1497 ethaddr_bytes = ETH_ALEN;
1498 else
1499 ethaddr_bytes = B44_PATTERN_SIZE - len;
1500 if (ethaddr_bytes <=0)
1501 break;
1502 for (k = 0; k< ethaddr_bytes; k++) {
1503 ppattern[offset + magicsync +
1504 (j * ETH_ALEN) + k] = macaddr[k];
1505 len++;
1506 set_bit(len, (unsigned long *) pmask);
1507 }
1508 }
1509 return len - 1;
1510}
1511
1512/* Setup magic packet patterns in the b44 WOL
1513 * pattern matching filter.
1514 */
1515static void b44_setup_pseudo_magicp(struct b44 *bp)
1516{
1517
1518 u32 val;
1519 int plen0, plen1, plen2;
1520 u8 *pwol_pattern;
1521 u8 pwol_mask[B44_PMASK_SIZE];
1522
1523 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1524 if (!pwol_pattern) {
1525 printk(KERN_ERR PFX "Memory not available for WOL\n");
1526 return;
1527 }
1528
1529 /* Ipv4 magic packet pattern - pattern 0.*/
1530 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1531 memset(pwol_mask, 0, B44_PMASK_SIZE);
1532 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1533 B44_ETHIPV4UDP_HLEN);
1534
1535 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1536 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1537
1538 /* Raw ethernet II magic packet pattern - pattern 1 */
1539 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1540 memset(pwol_mask, 0, B44_PMASK_SIZE);
1541 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1542 ETH_HLEN);
1543
1544 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1545 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1546 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1547 B44_PMASK_BASE + B44_PMASK_SIZE);
1548
1549 /* Ipv6 magic packet pattern - pattern 2 */
1550 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1551 memset(pwol_mask, 0, B44_PMASK_SIZE);
1552 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1553 B44_ETHIPV6UDP_HLEN);
1554
1555 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1556 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1557 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1558 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1559
1560 kfree(pwol_pattern);
1561
1562 /* set these pattern's lengths: one less than each real length */
1563 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1564 bw32(bp, B44_WKUP_LEN, val);
1565
1566 /* enable wakeup pattern matching */
1567 val = br32(bp, B44_DEVCTRL);
1568 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1569
1570}
52cafd96
GZ
1571
1572static void b44_setup_wol(struct b44 *bp)
1573{
1574 u32 val;
1575 u16 pmval;
1576
1577 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1578
1579 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1580
1581 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1582
1583 val = bp->dev->dev_addr[2] << 24 |
1584 bp->dev->dev_addr[3] << 16 |
1585 bp->dev->dev_addr[4] << 8 |
1586 bp->dev->dev_addr[5];
1587 bw32(bp, B44_ADDR_LO, val);
1588
1589 val = bp->dev->dev_addr[0] << 8 |
1590 bp->dev->dev_addr[1];
1591 bw32(bp, B44_ADDR_HI, val);
1592
1593 val = br32(bp, B44_DEVCTRL);
1594 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1595
725ad800
GZ
1596 } else {
1597 b44_setup_pseudo_magicp(bp);
1598 }
52cafd96
GZ
1599
1600 val = br32(bp, B44_SBTMSLOW);
1601 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1602
1603 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1604 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1605
1606}
1607
1da177e4
LT
1608static int b44_close(struct net_device *dev)
1609{
1610 struct b44 *bp = netdev_priv(dev);
1611
1612 netif_stop_queue(dev);
1613
ba5eec9c
FR
1614 netif_poll_disable(dev);
1615
1da177e4
LT
1616 del_timer_sync(&bp->timer);
1617
1618 spin_lock_irq(&bp->lock);
1619
1620#if 0
1621 b44_dump_state(bp);
1622#endif
1623 b44_halt(bp);
1624 b44_free_rings(bp);
c35ca399 1625 netif_carrier_off(dev);
1da177e4
LT
1626
1627 spin_unlock_irq(&bp->lock);
1628
1629 free_irq(dev->irq, dev);
1630
ba5eec9c
FR
1631 netif_poll_enable(dev);
1632
52cafd96 1633 if (bp->flags & B44_FLAG_WOL_ENABLE) {
5fc7d61a 1634 b44_init_hw(bp, B44_PARTIAL_RESET);
52cafd96
GZ
1635 b44_setup_wol(bp);
1636 }
1637
1da177e4
LT
1638 b44_free_consistent(bp);
1639
1640 return 0;
1641}
1642
1643static struct net_device_stats *b44_get_stats(struct net_device *dev)
1644{
1645 struct b44 *bp = netdev_priv(dev);
1646 struct net_device_stats *nstat = &bp->stats;
1647 struct b44_hw_stats *hwstat = &bp->hw_stats;
1648
1649 /* Convert HW stats into netdevice stats. */
1650 nstat->rx_packets = hwstat->rx_pkts;
1651 nstat->tx_packets = hwstat->tx_pkts;
1652 nstat->rx_bytes = hwstat->rx_octets;
1653 nstat->tx_bytes = hwstat->tx_octets;
1654 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1655 hwstat->tx_oversize_pkts +
1656 hwstat->tx_underruns +
1657 hwstat->tx_excessive_cols +
1658 hwstat->tx_late_cols);
1659 nstat->multicast = hwstat->tx_multicast_pkts;
1660 nstat->collisions = hwstat->tx_total_cols;
1661
1662 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1663 hwstat->rx_undersize);
1664 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1665 nstat->rx_frame_errors = hwstat->rx_align_errs;
1666 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1667 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1668 hwstat->rx_oversize_pkts +
1669 hwstat->rx_missed_pkts +
1670 hwstat->rx_crc_align_errs +
1671 hwstat->rx_undersize +
1672 hwstat->rx_crc_errs +
1673 hwstat->rx_align_errs +
1674 hwstat->rx_symbol_errs);
1675
1676 nstat->tx_aborted_errors = hwstat->tx_underruns;
1677#if 0
1678 /* Carrier lost counter seems to be broken for some devices */
1679 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1680#endif
1681
1682 return nstat;
1683}
1684
1685static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1686{
1687 struct dev_mc_list *mclist;
1688 int i, num_ents;
1689
1690 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1691 mclist = dev->mc_list;
1692 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1693 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1694 }
1695 return i+1;
1696}
1697
1698static void __b44_set_rx_mode(struct net_device *dev)
1699{
1700 struct b44 *bp = netdev_priv(dev);
1701 u32 val;
1da177e4
LT
1702
1703 val = br32(bp, B44_RXCONFIG);
1704 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1705 if (dev->flags & IFF_PROMISC) {
1706 val |= RXCONFIG_PROMISC;
1707 bw32(bp, B44_RXCONFIG, val);
1708 } else {
874a6214 1709 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
cda22aa9 1710 int i = 1;
874a6214 1711
1da177e4
LT
1712 __b44_set_mac_addr(bp);
1713
2f614fe0
JG
1714 if ((dev->flags & IFF_ALLMULTI) ||
1715 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1da177e4
LT
1716 val |= RXCONFIG_ALLMULTI;
1717 else
874a6214 1718 i = __b44_load_mcast(bp, dev);
10badc21 1719
2f614fe0 1720 for (; i < 64; i++)
10badc21 1721 __b44_cam_write(bp, zero, i);
2f614fe0 1722
1da177e4
LT
1723 bw32(bp, B44_RXCONFIG, val);
1724 val = br32(bp, B44_CAM_CTRL);
1725 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1726 }
1727}
1728
1729static void b44_set_rx_mode(struct net_device *dev)
1730{
1731 struct b44 *bp = netdev_priv(dev);
1732
1733 spin_lock_irq(&bp->lock);
1734 __b44_set_rx_mode(dev);
1735 spin_unlock_irq(&bp->lock);
1736}
1737
1738static u32 b44_get_msglevel(struct net_device *dev)
1739{
1740 struct b44 *bp = netdev_priv(dev);
1741 return bp->msg_enable;
1742}
1743
1744static void b44_set_msglevel(struct net_device *dev, u32 value)
1745{
1746 struct b44 *bp = netdev_priv(dev);
1747 bp->msg_enable = value;
1748}
1749
1750static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1751{
1752 struct b44 *bp = netdev_priv(dev);
1753 struct pci_dev *pci_dev = bp->pdev;
1754
1755 strcpy (info->driver, DRV_MODULE_NAME);
1756 strcpy (info->version, DRV_MODULE_VERSION);
1757 strcpy (info->bus_info, pci_name(pci_dev));
1758}
1759
1760static int b44_nway_reset(struct net_device *dev)
1761{
1762 struct b44 *bp = netdev_priv(dev);
1763 u32 bmcr;
1764 int r;
1765
1766 spin_lock_irq(&bp->lock);
1767 b44_readphy(bp, MII_BMCR, &bmcr);
1768 b44_readphy(bp, MII_BMCR, &bmcr);
1769 r = -EINVAL;
1770 if (bmcr & BMCR_ANENABLE) {
1771 b44_writephy(bp, MII_BMCR,
1772 bmcr | BMCR_ANRESTART);
1773 r = 0;
1774 }
1775 spin_unlock_irq(&bp->lock);
1776
1777 return r;
1778}
1779
1780static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1781{
1782 struct b44 *bp = netdev_priv(dev);
1783
1da177e4
LT
1784 cmd->supported = (SUPPORTED_Autoneg);
1785 cmd->supported |= (SUPPORTED_100baseT_Half |
1786 SUPPORTED_100baseT_Full |
1787 SUPPORTED_10baseT_Half |
1788 SUPPORTED_10baseT_Full |
1789 SUPPORTED_MII);
1790
1791 cmd->advertising = 0;
1792 if (bp->flags & B44_FLAG_ADV_10HALF)
adf6e000 1793 cmd->advertising |= ADVERTISED_10baseT_Half;
1da177e4 1794 if (bp->flags & B44_FLAG_ADV_10FULL)
adf6e000 1795 cmd->advertising |= ADVERTISED_10baseT_Full;
1da177e4 1796 if (bp->flags & B44_FLAG_ADV_100HALF)
adf6e000 1797 cmd->advertising |= ADVERTISED_100baseT_Half;
1da177e4 1798 if (bp->flags & B44_FLAG_ADV_100FULL)
adf6e000
MW
1799 cmd->advertising |= ADVERTISED_100baseT_Full;
1800 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1da177e4
LT
1801 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1802 SPEED_100 : SPEED_10;
1803 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1804 DUPLEX_FULL : DUPLEX_HALF;
1805 cmd->port = 0;
1806 cmd->phy_address = bp->phy_addr;
1807 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1808 XCVR_INTERNAL : XCVR_EXTERNAL;
1809 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1810 AUTONEG_DISABLE : AUTONEG_ENABLE;
47b9c3b1
GZ
1811 if (cmd->autoneg == AUTONEG_ENABLE)
1812 cmd->advertising |= ADVERTISED_Autoneg;
1813 if (!netif_running(dev)){
1814 cmd->speed = 0;
1815 cmd->duplex = 0xff;
1816 }
1da177e4
LT
1817 cmd->maxtxpkt = 0;
1818 cmd->maxrxpkt = 0;
1819 return 0;
1820}
1821
1822static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1823{
1824 struct b44 *bp = netdev_priv(dev);
1825
1da177e4
LT
1826 /* We do not support gigabit. */
1827 if (cmd->autoneg == AUTONEG_ENABLE) {
1828 if (cmd->advertising &
1829 (ADVERTISED_1000baseT_Half |
1830 ADVERTISED_1000baseT_Full))
1831 return -EINVAL;
1832 } else if ((cmd->speed != SPEED_100 &&
1833 cmd->speed != SPEED_10) ||
1834 (cmd->duplex != DUPLEX_HALF &&
1835 cmd->duplex != DUPLEX_FULL)) {
1836 return -EINVAL;
1837 }
1838
1839 spin_lock_irq(&bp->lock);
1840
1841 if (cmd->autoneg == AUTONEG_ENABLE) {
47b9c3b1
GZ
1842 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1843 B44_FLAG_100_BASE_T |
1844 B44_FLAG_FULL_DUPLEX |
1845 B44_FLAG_ADV_10HALF |
1da177e4
LT
1846 B44_FLAG_ADV_10FULL |
1847 B44_FLAG_ADV_100HALF |
1848 B44_FLAG_ADV_100FULL);
47b9c3b1
GZ
1849 if (cmd->advertising == 0) {
1850 bp->flags |= (B44_FLAG_ADV_10HALF |
1851 B44_FLAG_ADV_10FULL |
1852 B44_FLAG_ADV_100HALF |
1853 B44_FLAG_ADV_100FULL);
1854 } else {
1855 if (cmd->advertising & ADVERTISED_10baseT_Half)
1856 bp->flags |= B44_FLAG_ADV_10HALF;
1857 if (cmd->advertising & ADVERTISED_10baseT_Full)
1858 bp->flags |= B44_FLAG_ADV_10FULL;
1859 if (cmd->advertising & ADVERTISED_100baseT_Half)
1860 bp->flags |= B44_FLAG_ADV_100HALF;
1861 if (cmd->advertising & ADVERTISED_100baseT_Full)
1862 bp->flags |= B44_FLAG_ADV_100FULL;
1863 }
1da177e4
LT
1864 } else {
1865 bp->flags |= B44_FLAG_FORCE_LINK;
47b9c3b1 1866 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1da177e4
LT
1867 if (cmd->speed == SPEED_100)
1868 bp->flags |= B44_FLAG_100_BASE_T;
1869 if (cmd->duplex == DUPLEX_FULL)
1870 bp->flags |= B44_FLAG_FULL_DUPLEX;
1871 }
1872
47b9c3b1
GZ
1873 if (netif_running(dev))
1874 b44_setup_phy(bp);
1da177e4
LT
1875
1876 spin_unlock_irq(&bp->lock);
1877
1878 return 0;
1879}
1880
1881static void b44_get_ringparam(struct net_device *dev,
1882 struct ethtool_ringparam *ering)
1883{
1884 struct b44 *bp = netdev_priv(dev);
1885
1886 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1887 ering->rx_pending = bp->rx_pending;
1888
1889 /* XXX ethtool lacks a tx_max_pending, oops... */
1890}
1891
1892static int b44_set_ringparam(struct net_device *dev,
1893 struct ethtool_ringparam *ering)
1894{
1895 struct b44 *bp = netdev_priv(dev);
1896
1897 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1898 (ering->rx_mini_pending != 0) ||
1899 (ering->rx_jumbo_pending != 0) ||
1900 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1901 return -EINVAL;
1902
1903 spin_lock_irq(&bp->lock);
1904
1905 bp->rx_pending = ering->rx_pending;
1906 bp->tx_pending = ering->tx_pending;
1907
1908 b44_halt(bp);
1909 b44_init_rings(bp);
5fc7d61a 1910 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1911 netif_wake_queue(bp->dev);
1912 spin_unlock_irq(&bp->lock);
1913
1914 b44_enable_ints(bp);
10badc21 1915
1da177e4
LT
1916 return 0;
1917}
1918
1919static void b44_get_pauseparam(struct net_device *dev,
1920 struct ethtool_pauseparam *epause)
1921{
1922 struct b44 *bp = netdev_priv(dev);
1923
1924 epause->autoneg =
1925 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1926 epause->rx_pause =
1927 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1928 epause->tx_pause =
1929 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1930}
1931
1932static int b44_set_pauseparam(struct net_device *dev,
1933 struct ethtool_pauseparam *epause)
1934{
1935 struct b44 *bp = netdev_priv(dev);
1936
1937 spin_lock_irq(&bp->lock);
1938 if (epause->autoneg)
1939 bp->flags |= B44_FLAG_PAUSE_AUTO;
1940 else
1941 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1942 if (epause->rx_pause)
1943 bp->flags |= B44_FLAG_RX_PAUSE;
1944 else
1945 bp->flags &= ~B44_FLAG_RX_PAUSE;
1946 if (epause->tx_pause)
1947 bp->flags |= B44_FLAG_TX_PAUSE;
1948 else
1949 bp->flags &= ~B44_FLAG_TX_PAUSE;
1950 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1951 b44_halt(bp);
1952 b44_init_rings(bp);
5fc7d61a 1953 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1954 } else {
1955 __b44_set_flow_ctrl(bp, bp->flags);
1956 }
1957 spin_unlock_irq(&bp->lock);
1958
1959 b44_enable_ints(bp);
10badc21 1960
1da177e4
LT
1961 return 0;
1962}
1963
3353930d
FR
1964static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1965{
1966 switch(stringset) {
1967 case ETH_SS_STATS:
1968 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1969 break;
1970 }
1971}
1972
1973static int b44_get_stats_count(struct net_device *dev)
1974{
1975 return ARRAY_SIZE(b44_gstrings);
1976}
1977
1978static void b44_get_ethtool_stats(struct net_device *dev,
1979 struct ethtool_stats *stats, u64 *data)
1980{
1981 struct b44 *bp = netdev_priv(dev);
1982 u32 *val = &bp->hw_stats.tx_good_octets;
1983 u32 i;
1984
1985 spin_lock_irq(&bp->lock);
1986
1987 b44_stats_update(bp);
1988
1989 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1990 *data++ = *val++;
1991
1992 spin_unlock_irq(&bp->lock);
1993}
1994
52cafd96
GZ
1995static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1996{
1997 struct b44 *bp = netdev_priv(dev);
1998
1999 wol->supported = WAKE_MAGIC;
2000 if (bp->flags & B44_FLAG_WOL_ENABLE)
2001 wol->wolopts = WAKE_MAGIC;
2002 else
2003 wol->wolopts = 0;
2004 memset(&wol->sopass, 0, sizeof(wol->sopass));
2005}
2006
2007static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2008{
2009 struct b44 *bp = netdev_priv(dev);
2010
2011 spin_lock_irq(&bp->lock);
2012 if (wol->wolopts & WAKE_MAGIC)
2013 bp->flags |= B44_FLAG_WOL_ENABLE;
2014 else
2015 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2016 spin_unlock_irq(&bp->lock);
2017
2018 return 0;
2019}
2020
7282d491 2021static const struct ethtool_ops b44_ethtool_ops = {
1da177e4
LT
2022 .get_drvinfo = b44_get_drvinfo,
2023 .get_settings = b44_get_settings,
2024 .set_settings = b44_set_settings,
2025 .nway_reset = b44_nway_reset,
2026 .get_link = ethtool_op_get_link,
52cafd96
GZ
2027 .get_wol = b44_get_wol,
2028 .set_wol = b44_set_wol,
1da177e4
LT
2029 .get_ringparam = b44_get_ringparam,
2030 .set_ringparam = b44_set_ringparam,
2031 .get_pauseparam = b44_get_pauseparam,
2032 .set_pauseparam = b44_set_pauseparam,
2033 .get_msglevel = b44_get_msglevel,
2034 .set_msglevel = b44_set_msglevel,
3353930d
FR
2035 .get_strings = b44_get_strings,
2036 .get_stats_count = b44_get_stats_count,
2037 .get_ethtool_stats = b44_get_ethtool_stats,
2160de53 2038 .get_perm_addr = ethtool_op_get_perm_addr,
1da177e4
LT
2039};
2040
2041static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2042{
2043 struct mii_ioctl_data *data = if_mii(ifr);
2044 struct b44 *bp = netdev_priv(dev);
3410572d
FR
2045 int err = -EINVAL;
2046
2047 if (!netif_running(dev))
2048 goto out;
1da177e4
LT
2049
2050 spin_lock_irq(&bp->lock);
2051 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2052 spin_unlock_irq(&bp->lock);
3410572d 2053out:
1da177e4
LT
2054 return err;
2055}
2056
2057/* Read 128-bytes of EEPROM. */
2058static int b44_read_eeprom(struct b44 *bp, u8 *data)
2059{
2060 long i;
a7bed27d 2061 __le16 *ptr = (__le16 *) data;
1da177e4
LT
2062
2063 for (i = 0; i < 128; i += 2)
6f627683 2064 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
1da177e4
LT
2065
2066 return 0;
2067}
2068
2069static int __devinit b44_get_invariants(struct b44 *bp)
2070{
2071 u8 eeprom[128];
2072 int err;
2073
2074 err = b44_read_eeprom(bp, &eeprom[0]);
2075 if (err)
2076 goto out;
2077
2078 bp->dev->dev_addr[0] = eeprom[79];
2079 bp->dev->dev_addr[1] = eeprom[78];
2080 bp->dev->dev_addr[2] = eeprom[81];
2081 bp->dev->dev_addr[3] = eeprom[80];
2082 bp->dev->dev_addr[4] = eeprom[83];
2083 bp->dev->dev_addr[5] = eeprom[82];
391fc09a
GZ
2084
2085 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2086 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2087 return -EINVAL;
2088 }
2089
2160de53 2090 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1da177e4
LT
2091
2092 bp->phy_addr = eeprom[90] & 0x1f;
2093
1da177e4
LT
2094 bp->imask = IMASK_DEF;
2095
2096 bp->core_unit = ssb_core_unit(bp);
2097 bp->dma_offset = SB_PCI_DMA;
2098
10badc21 2099 /* XXX - really required?
1da177e4
LT
2100 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2101 */
52cafd96
GZ
2102
2103 if (ssb_get_core_rev(bp) >= 7)
2104 bp->flags |= B44_FLAG_B0_ANDLATER;
2105
1da177e4
LT
2106out:
2107 return err;
2108}
2109
2110static int __devinit b44_init_one(struct pci_dev *pdev,
2111 const struct pci_device_id *ent)
2112{
2113 static int b44_version_printed = 0;
2114 unsigned long b44reg_base, b44reg_len;
2115 struct net_device *dev;
2116 struct b44 *bp;
2117 int err, i;
2118
2119 if (b44_version_printed++ == 0)
2120 printk(KERN_INFO "%s", version);
2121
2122 err = pci_enable_device(pdev);
2123 if (err) {
9b91cf9d 2124 dev_err(&pdev->dev, "Cannot enable PCI device, "
1da177e4
LT
2125 "aborting.\n");
2126 return err;
2127 }
2128
2129 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 2130 dev_err(&pdev->dev,
2e8a538d 2131 "Cannot find proper PCI device "
1da177e4
LT
2132 "base address, aborting.\n");
2133 err = -ENODEV;
2134 goto err_out_disable_pdev;
2135 }
2136
2137 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2138 if (err) {
9b91cf9d 2139 dev_err(&pdev->dev,
2e8a538d 2140 "Cannot obtain PCI resources, aborting.\n");
1da177e4
LT
2141 goto err_out_disable_pdev;
2142 }
2143
2144 pci_set_master(pdev);
2145
97db9ee7 2146 err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1da177e4 2147 if (err) {
9b91cf9d 2148 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
1da177e4
LT
2149 goto err_out_free_res;
2150 }
10badc21 2151
97db9ee7 2152 err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
1da177e4 2153 if (err) {
9b91cf9d 2154 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
874a6214 2155 goto err_out_free_res;
1da177e4
LT
2156 }
2157
2158 b44reg_base = pci_resource_start(pdev, 0);
2159 b44reg_len = pci_resource_len(pdev, 0);
2160
2161 dev = alloc_etherdev(sizeof(*bp));
2162 if (!dev) {
9b91cf9d 2163 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
1da177e4
LT
2164 err = -ENOMEM;
2165 goto err_out_free_res;
2166 }
2167
2168 SET_MODULE_OWNER(dev);
2169 SET_NETDEV_DEV(dev,&pdev->dev);
2170
2171 /* No interesting netdevice features in this card... */
2172 dev->features |= 0;
2173
2174 bp = netdev_priv(dev);
2175 bp->pdev = pdev;
2176 bp->dev = dev;
874a6214
FR
2177
2178 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1da177e4
LT
2179
2180 spin_lock_init(&bp->lock);
2181
2182 bp->regs = ioremap(b44reg_base, b44reg_len);
2183 if (bp->regs == 0UL) {
9b91cf9d 2184 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
1da177e4
LT
2185 err = -ENOMEM;
2186 goto err_out_free_dev;
2187 }
2188
2189 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2190 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2191
2192 dev->open = b44_open;
2193 dev->stop = b44_close;
2194 dev->hard_start_xmit = b44_start_xmit;
2195 dev->get_stats = b44_get_stats;
2196 dev->set_multicast_list = b44_set_rx_mode;
2197 dev->set_mac_address = b44_set_mac_addr;
2198 dev->do_ioctl = b44_ioctl;
2199 dev->tx_timeout = b44_tx_timeout;
2200 dev->poll = b44_poll;
2201 dev->weight = 64;
2202 dev->watchdog_timeo = B44_TX_TIMEOUT;
2203#ifdef CONFIG_NET_POLL_CONTROLLER
2204 dev->poll_controller = b44_poll_controller;
2205#endif
2206 dev->change_mtu = b44_change_mtu;
2207 dev->irq = pdev->irq;
2208 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2209
c35ca399
SH
2210 netif_carrier_off(dev);
2211
1da177e4
LT
2212 err = b44_get_invariants(bp);
2213 if (err) {
9b91cf9d 2214 dev_err(&pdev->dev,
2e8a538d 2215 "Problem fetching invariants of chip, aborting.\n");
1da177e4
LT
2216 goto err_out_iounmap;
2217 }
2218
2219 bp->mii_if.dev = dev;
2220 bp->mii_if.mdio_read = b44_mii_read;
2221 bp->mii_if.mdio_write = b44_mii_write;
2222 bp->mii_if.phy_id = bp->phy_addr;
2223 bp->mii_if.phy_id_mask = 0x1f;
2224 bp->mii_if.reg_num_mask = 0x1f;
2225
2226 /* By default, advertise all speed/duplex settings. */
2227 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2228 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2229
2230 /* By default, auto-negotiate PAUSE. */
2231 bp->flags |= B44_FLAG_PAUSE_AUTO;
2232
2233 err = register_netdev(dev);
2234 if (err) {
9b91cf9d 2235 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1da177e4
LT
2236 goto err_out_iounmap;
2237 }
2238
2239 pci_set_drvdata(pdev, dev);
2240
2241 pci_save_state(bp->pdev);
2242
10badc21 2243 /* Chip reset provides power to the b44 MAC & PCI cores, which
5c513129 2244 * is necessary for MAC register access.
10badc21 2245 */
5c513129
GZ
2246 b44_chip_reset(bp);
2247
1da177e4
LT
2248 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2249 for (i = 0; i < 6; i++)
2250 printk("%2.2x%c", dev->dev_addr[i],
2251 i == 5 ? '\n' : ':');
2252
2253 return 0;
2254
2255err_out_iounmap:
2256 iounmap(bp->regs);
2257
2258err_out_free_dev:
2259 free_netdev(dev);
2260
2261err_out_free_res:
2262 pci_release_regions(pdev);
2263
2264err_out_disable_pdev:
2265 pci_disable_device(pdev);
2266 pci_set_drvdata(pdev, NULL);
2267 return err;
2268}
2269
2270static void __devexit b44_remove_one(struct pci_dev *pdev)
2271{
2272 struct net_device *dev = pci_get_drvdata(pdev);
874a6214 2273 struct b44 *bp = netdev_priv(dev);
1da177e4 2274
874a6214
FR
2275 unregister_netdev(dev);
2276 iounmap(bp->regs);
2277 free_netdev(dev);
2278 pci_release_regions(pdev);
2279 pci_disable_device(pdev);
2280 pci_set_drvdata(pdev, NULL);
1da177e4
LT
2281}
2282
2283static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2284{
2285 struct net_device *dev = pci_get_drvdata(pdev);
2286 struct b44 *bp = netdev_priv(dev);
2287
2288 if (!netif_running(dev))
2289 return 0;
2290
2291 del_timer_sync(&bp->timer);
2292
10badc21 2293 spin_lock_irq(&bp->lock);
1da177e4
LT
2294
2295 b44_halt(bp);
10badc21 2296 netif_carrier_off(bp->dev);
1da177e4
LT
2297 netif_device_detach(bp->dev);
2298 b44_free_rings(bp);
2299
2300 spin_unlock_irq(&bp->lock);
46e17853
PM
2301
2302 free_irq(dev->irq, dev);
52cafd96 2303 if (bp->flags & B44_FLAG_WOL_ENABLE) {
5fc7d61a 2304 b44_init_hw(bp, B44_PARTIAL_RESET);
52cafd96
GZ
2305 b44_setup_wol(bp);
2306 }
d58da590 2307 pci_disable_device(pdev);
1da177e4
LT
2308 return 0;
2309}
2310
2311static int b44_resume(struct pci_dev *pdev)
2312{
2313 struct net_device *dev = pci_get_drvdata(pdev);
2314 struct b44 *bp = netdev_priv(dev);
90afd0e5 2315 int rc = 0;
1da177e4
LT
2316
2317 pci_restore_state(pdev);
90afd0e5
DM
2318 rc = pci_enable_device(pdev);
2319 if (rc) {
2320 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2321 dev->name);
2322 return rc;
2323 }
2324
d58da590 2325 pci_set_master(pdev);
1da177e4
LT
2326
2327 if (!netif_running(dev))
2328 return 0;
2329
90afd0e5
DM
2330 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2331 if (rc) {
46e17853 2332 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
90afd0e5
DM
2333 pci_disable_device(pdev);
2334 return rc;
2335 }
46e17853 2336
1da177e4
LT
2337 spin_lock_irq(&bp->lock);
2338
2339 b44_init_rings(bp);
5fc7d61a 2340 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
2341 netif_device_attach(bp->dev);
2342 spin_unlock_irq(&bp->lock);
2343
1da177e4 2344 b44_enable_ints(bp);
d9e2d185 2345 netif_wake_queue(dev);
a72a8179
SH
2346
2347 mod_timer(&bp->timer, jiffies + 1);
2348
1da177e4
LT
2349 return 0;
2350}
2351
2352static struct pci_driver b44_driver = {
2353 .name = DRV_MODULE_NAME,
2354 .id_table = b44_pci_tbl,
2355 .probe = b44_init_one,
2356 .remove = __devexit_p(b44_remove_one),
2357 .suspend = b44_suspend,
2358 .resume = b44_resume,
2359};
2360
2361static int __init b44_init(void)
2362{
9f38c636
JL
2363 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2364
2365 /* Setup paramaters for syncing RX/TX DMA descriptors */
2366 dma_desc_align_mask = ~(dma_desc_align_size - 1);
22d4d771 2367 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
9f38c636 2368
29917620 2369 return pci_register_driver(&b44_driver);
1da177e4
LT
2370}
2371
2372static void __exit b44_cleanup(void)
2373{
2374 pci_unregister_driver(&b44_driver);
2375}
2376
2377module_init(b44_init);
2378module_exit(b44_cleanup);
2379