Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / net / b44.c
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7 * Copyright (C) 2006 Broadcom Corporation.
8 * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
9 *
10 * Distribute under GPL.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/mii.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <linux/etherdevice.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/ssb/ssb.h>
30 #include <linux/slab.h>
31
32 #include <asm/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/irq.h>
35
36
37 #include "b44.h"
38
39 #define DRV_MODULE_NAME "b44"
40 #define DRV_MODULE_VERSION "2.0"
41
42 #define B44_DEF_MSG_ENABLE \
43 (NETIF_MSG_DRV | \
44 NETIF_MSG_PROBE | \
45 NETIF_MSG_LINK | \
46 NETIF_MSG_TIMER | \
47 NETIF_MSG_IFDOWN | \
48 NETIF_MSG_IFUP | \
49 NETIF_MSG_RX_ERR | \
50 NETIF_MSG_TX_ERR)
51
52 /* length of time before we decide the hardware is borked,
53 * and dev->tx_timeout() should be called to fix the problem
54 */
55 #define B44_TX_TIMEOUT (5 * HZ)
56
57 /* hardware minimum and maximum for a single frame's data payload */
58 #define B44_MIN_MTU 60
59 #define B44_MAX_MTU 1500
60
61 #define B44_RX_RING_SIZE 512
62 #define B44_DEF_RX_RING_PENDING 200
63 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
64 B44_RX_RING_SIZE)
65 #define B44_TX_RING_SIZE 512
66 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
67 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
68 B44_TX_RING_SIZE)
69
70 #define TX_RING_GAP(BP) \
71 (B44_TX_RING_SIZE - (BP)->tx_pending)
72 #define TX_BUFFS_AVAIL(BP) \
73 (((BP)->tx_cons <= (BP)->tx_prod) ? \
74 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
75 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
76 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
77
78 #define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
79 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
80
81 /* minimum number of free TX descriptors required to wake up TX process */
82 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
83
84 /* b44 internal pattern match filter info */
85 #define B44_PATTERN_BASE 0x400
86 #define B44_PATTERN_SIZE 0x80
87 #define B44_PMASK_BASE 0x600
88 #define B44_PMASK_SIZE 0x10
89 #define B44_MAX_PATTERNS 16
90 #define B44_ETHIPV6UDP_HLEN 62
91 #define B44_ETHIPV4UDP_HLEN 42
92
93 static char version[] __devinitdata =
94 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
95
96 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
97 MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
98 MODULE_LICENSE("GPL");
99 MODULE_VERSION(DRV_MODULE_VERSION);
100
101 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
102 module_param(b44_debug, int, 0);
103 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
104
105
106 #ifdef CONFIG_B44_PCI
107 static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
109 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
110 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
111 { 0 } /* terminate list with empty entry */
112 };
113 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
114
115 static struct pci_driver b44_pci_driver = {
116 .name = DRV_MODULE_NAME,
117 .id_table = b44_pci_tbl,
118 };
119 #endif /* CONFIG_B44_PCI */
120
121 static const struct ssb_device_id b44_ssb_tbl[] = {
122 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
123 SSB_DEVTABLE_END
124 };
125 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
126
127 static void b44_halt(struct b44 *);
128 static void b44_init_rings(struct b44 *);
129
130 #define B44_FULL_RESET 1
131 #define B44_FULL_RESET_SKIP_PHY 2
132 #define B44_PARTIAL_RESET 3
133 #define B44_CHIP_RESET_FULL 4
134 #define B44_CHIP_RESET_PARTIAL 5
135
136 static void b44_init_hw(struct b44 *, int);
137
138 static int dma_desc_align_mask;
139 static int dma_desc_sync_size;
140 static int instance;
141
142 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
143 #define _B44(x...) # x,
144 B44_STAT_REG_DECLARE
145 #undef _B44
146 };
147
148 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
149 dma_addr_t dma_base,
150 unsigned long offset,
151 enum dma_data_direction dir)
152 {
153 ssb_dma_sync_single_range_for_device(sdev, dma_base,
154 offset & dma_desc_align_mask,
155 dma_desc_sync_size, dir);
156 }
157
158 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
159 dma_addr_t dma_base,
160 unsigned long offset,
161 enum dma_data_direction dir)
162 {
163 ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
164 offset & dma_desc_align_mask,
165 dma_desc_sync_size, dir);
166 }
167
168 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
169 {
170 return ssb_read32(bp->sdev, reg);
171 }
172
173 static inline void bw32(const struct b44 *bp,
174 unsigned long reg, unsigned long val)
175 {
176 ssb_write32(bp->sdev, reg, val);
177 }
178
179 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
180 u32 bit, unsigned long timeout, const int clear)
181 {
182 unsigned long i;
183
184 for (i = 0; i < timeout; i++) {
185 u32 val = br32(bp, reg);
186
187 if (clear && !(val & bit))
188 break;
189 if (!clear && (val & bit))
190 break;
191 udelay(10);
192 }
193 if (i == timeout) {
194 if (net_ratelimit())
195 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
196 bit, reg, clear ? "clear" : "set");
197
198 return -ENODEV;
199 }
200 return 0;
201 }
202
203 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
204 {
205 u32 val;
206
207 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
208 (index << CAM_CTRL_INDEX_SHIFT)));
209
210 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
211
212 val = br32(bp, B44_CAM_DATA_LO);
213
214 data[2] = (val >> 24) & 0xFF;
215 data[3] = (val >> 16) & 0xFF;
216 data[4] = (val >> 8) & 0xFF;
217 data[5] = (val >> 0) & 0xFF;
218
219 val = br32(bp, B44_CAM_DATA_HI);
220
221 data[0] = (val >> 8) & 0xFF;
222 data[1] = (val >> 0) & 0xFF;
223 }
224
225 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
226 {
227 u32 val;
228
229 val = ((u32) data[2]) << 24;
230 val |= ((u32) data[3]) << 16;
231 val |= ((u32) data[4]) << 8;
232 val |= ((u32) data[5]) << 0;
233 bw32(bp, B44_CAM_DATA_LO, val);
234 val = (CAM_DATA_HI_VALID |
235 (((u32) data[0]) << 8) |
236 (((u32) data[1]) << 0));
237 bw32(bp, B44_CAM_DATA_HI, val);
238 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
239 (index << CAM_CTRL_INDEX_SHIFT)));
240 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
241 }
242
243 static inline void __b44_disable_ints(struct b44 *bp)
244 {
245 bw32(bp, B44_IMASK, 0);
246 }
247
248 static void b44_disable_ints(struct b44 *bp)
249 {
250 __b44_disable_ints(bp);
251
252 /* Flush posted writes. */
253 br32(bp, B44_IMASK);
254 }
255
256 static void b44_enable_ints(struct b44 *bp)
257 {
258 bw32(bp, B44_IMASK, bp->imask);
259 }
260
261 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
262 {
263 int err;
264
265 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
266 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
267 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
268 (phy_addr << MDIO_DATA_PMD_SHIFT) |
269 (reg << MDIO_DATA_RA_SHIFT) |
270 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
271 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
272 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
273
274 return err;
275 }
276
277 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
278 {
279 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
280 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
281 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
282 (phy_addr << MDIO_DATA_PMD_SHIFT) |
283 (reg << MDIO_DATA_RA_SHIFT) |
284 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
285 (val & MDIO_DATA_DATA)));
286 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
287 }
288
289 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
290 {
291 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
292 return 0;
293
294 return __b44_readphy(bp, bp->phy_addr, reg, val);
295 }
296
297 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
298 {
299 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
300 return 0;
301
302 return __b44_writephy(bp, bp->phy_addr, reg, val);
303 }
304
305 /* miilib interface */
306 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
307 {
308 u32 val;
309 struct b44 *bp = netdev_priv(dev);
310 int rc = __b44_readphy(bp, phy_id, location, &val);
311 if (rc)
312 return 0xffffffff;
313 return val;
314 }
315
316 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
317 int val)
318 {
319 struct b44 *bp = netdev_priv(dev);
320 __b44_writephy(bp, phy_id, location, val);
321 }
322
323 static int b44_phy_reset(struct b44 *bp)
324 {
325 u32 val;
326 int err;
327
328 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
329 return 0;
330 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
331 if (err)
332 return err;
333 udelay(100);
334 err = b44_readphy(bp, MII_BMCR, &val);
335 if (!err) {
336 if (val & BMCR_RESET) {
337 netdev_err(bp->dev, "PHY Reset would not complete\n");
338 err = -ENODEV;
339 }
340 }
341
342 return err;
343 }
344
345 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
346 {
347 u32 val;
348
349 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
350 bp->flags |= pause_flags;
351
352 val = br32(bp, B44_RXCONFIG);
353 if (pause_flags & B44_FLAG_RX_PAUSE)
354 val |= RXCONFIG_FLOW;
355 else
356 val &= ~RXCONFIG_FLOW;
357 bw32(bp, B44_RXCONFIG, val);
358
359 val = br32(bp, B44_MAC_FLOW);
360 if (pause_flags & B44_FLAG_TX_PAUSE)
361 val |= (MAC_FLOW_PAUSE_ENAB |
362 (0xc0 & MAC_FLOW_RX_HI_WATER));
363 else
364 val &= ~MAC_FLOW_PAUSE_ENAB;
365 bw32(bp, B44_MAC_FLOW, val);
366 }
367
368 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
369 {
370 u32 pause_enab = 0;
371
372 /* The driver supports only rx pause by default because
373 the b44 mac tx pause mechanism generates excessive
374 pause frames.
375 Use ethtool to turn on b44 tx pause if necessary.
376 */
377 if ((local & ADVERTISE_PAUSE_CAP) &&
378 (local & ADVERTISE_PAUSE_ASYM)){
379 if ((remote & LPA_PAUSE_ASYM) &&
380 !(remote & LPA_PAUSE_CAP))
381 pause_enab |= B44_FLAG_RX_PAUSE;
382 }
383
384 __b44_set_flow_ctrl(bp, pause_enab);
385 }
386
387 #ifdef SSB_DRIVER_MIPS
388 extern char *nvram_get(char *name);
389 static void b44_wap54g10_workaround(struct b44 *bp)
390 {
391 const char *str;
392 u32 val;
393 int err;
394
395 /*
396 * workaround for bad hardware design in Linksys WAP54G v1.0
397 * see https://dev.openwrt.org/ticket/146
398 * check and reset bit "isolate"
399 */
400 str = nvram_get("boardnum");
401 if (!str)
402 return;
403 if (simple_strtoul(str, NULL, 0) == 2) {
404 err = __b44_readphy(bp, 0, MII_BMCR, &val);
405 if (err)
406 goto error;
407 if (!(val & BMCR_ISOLATE))
408 return;
409 val &= ~BMCR_ISOLATE;
410 err = __b44_writephy(bp, 0, MII_BMCR, val);
411 if (err)
412 goto error;
413 }
414 return;
415 error:
416 pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
417 }
418 #else
419 static inline void b44_wap54g10_workaround(struct b44 *bp)
420 {
421 }
422 #endif
423
424 static int b44_setup_phy(struct b44 *bp)
425 {
426 u32 val;
427 int err;
428
429 b44_wap54g10_workaround(bp);
430
431 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
432 return 0;
433 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
434 goto out;
435 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
436 val & MII_ALEDCTRL_ALLMSK)) != 0)
437 goto out;
438 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
439 goto out;
440 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
441 val | MII_TLEDCTRL_ENABLE)) != 0)
442 goto out;
443
444 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
445 u32 adv = ADVERTISE_CSMA;
446
447 if (bp->flags & B44_FLAG_ADV_10HALF)
448 adv |= ADVERTISE_10HALF;
449 if (bp->flags & B44_FLAG_ADV_10FULL)
450 adv |= ADVERTISE_10FULL;
451 if (bp->flags & B44_FLAG_ADV_100HALF)
452 adv |= ADVERTISE_100HALF;
453 if (bp->flags & B44_FLAG_ADV_100FULL)
454 adv |= ADVERTISE_100FULL;
455
456 if (bp->flags & B44_FLAG_PAUSE_AUTO)
457 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
458
459 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
460 goto out;
461 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
462 BMCR_ANRESTART))) != 0)
463 goto out;
464 } else {
465 u32 bmcr;
466
467 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
468 goto out;
469 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
470 if (bp->flags & B44_FLAG_100_BASE_T)
471 bmcr |= BMCR_SPEED100;
472 if (bp->flags & B44_FLAG_FULL_DUPLEX)
473 bmcr |= BMCR_FULLDPLX;
474 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
475 goto out;
476
477 /* Since we will not be negotiating there is no safe way
478 * to determine if the link partner supports flow control
479 * or not. So just disable it completely in this case.
480 */
481 b44_set_flow_ctrl(bp, 0, 0);
482 }
483
484 out:
485 return err;
486 }
487
488 static void b44_stats_update(struct b44 *bp)
489 {
490 unsigned long reg;
491 u32 *val;
492
493 val = &bp->hw_stats.tx_good_octets;
494 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
495 *val++ += br32(bp, reg);
496 }
497
498 /* Pad */
499 reg += 8*4UL;
500
501 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
502 *val++ += br32(bp, reg);
503 }
504 }
505
506 static void b44_link_report(struct b44 *bp)
507 {
508 if (!netif_carrier_ok(bp->dev)) {
509 netdev_info(bp->dev, "Link is down\n");
510 } else {
511 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
512 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
513 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
514
515 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
516 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
517 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
518 }
519 }
520
521 static void b44_check_phy(struct b44 *bp)
522 {
523 u32 bmsr, aux;
524
525 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
526 bp->flags |= B44_FLAG_100_BASE_T;
527 bp->flags |= B44_FLAG_FULL_DUPLEX;
528 if (!netif_carrier_ok(bp->dev)) {
529 u32 val = br32(bp, B44_TX_CTRL);
530 val |= TX_CTRL_DUPLEX;
531 bw32(bp, B44_TX_CTRL, val);
532 netif_carrier_on(bp->dev);
533 b44_link_report(bp);
534 }
535 return;
536 }
537
538 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
539 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
540 (bmsr != 0xffff)) {
541 if (aux & MII_AUXCTRL_SPEED)
542 bp->flags |= B44_FLAG_100_BASE_T;
543 else
544 bp->flags &= ~B44_FLAG_100_BASE_T;
545 if (aux & MII_AUXCTRL_DUPLEX)
546 bp->flags |= B44_FLAG_FULL_DUPLEX;
547 else
548 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
549
550 if (!netif_carrier_ok(bp->dev) &&
551 (bmsr & BMSR_LSTATUS)) {
552 u32 val = br32(bp, B44_TX_CTRL);
553 u32 local_adv, remote_adv;
554
555 if (bp->flags & B44_FLAG_FULL_DUPLEX)
556 val |= TX_CTRL_DUPLEX;
557 else
558 val &= ~TX_CTRL_DUPLEX;
559 bw32(bp, B44_TX_CTRL, val);
560
561 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
562 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
563 !b44_readphy(bp, MII_LPA, &remote_adv))
564 b44_set_flow_ctrl(bp, local_adv, remote_adv);
565
566 /* Link now up */
567 netif_carrier_on(bp->dev);
568 b44_link_report(bp);
569 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
570 /* Link now down */
571 netif_carrier_off(bp->dev);
572 b44_link_report(bp);
573 }
574
575 if (bmsr & BMSR_RFAULT)
576 netdev_warn(bp->dev, "Remote fault detected in PHY\n");
577 if (bmsr & BMSR_JCD)
578 netdev_warn(bp->dev, "Jabber detected in PHY\n");
579 }
580 }
581
582 static void b44_timer(unsigned long __opaque)
583 {
584 struct b44 *bp = (struct b44 *) __opaque;
585
586 spin_lock_irq(&bp->lock);
587
588 b44_check_phy(bp);
589
590 b44_stats_update(bp);
591
592 spin_unlock_irq(&bp->lock);
593
594 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
595 }
596
597 static void b44_tx(struct b44 *bp)
598 {
599 u32 cur, cons;
600
601 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
602 cur /= sizeof(struct dma_desc);
603
604 /* XXX needs updating when NETIF_F_SG is supported */
605 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
606 struct ring_info *rp = &bp->tx_buffers[cons];
607 struct sk_buff *skb = rp->skb;
608
609 BUG_ON(skb == NULL);
610
611 ssb_dma_unmap_single(bp->sdev,
612 rp->mapping,
613 skb->len,
614 DMA_TO_DEVICE);
615 rp->skb = NULL;
616 dev_kfree_skb_irq(skb);
617 }
618
619 bp->tx_cons = cons;
620 if (netif_queue_stopped(bp->dev) &&
621 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
622 netif_wake_queue(bp->dev);
623
624 bw32(bp, B44_GPTIMER, 0);
625 }
626
627 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
628 * before the DMA address you give it. So we allocate 30 more bytes
629 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
630 * point the chip at 30 bytes past where the rx_header will go.
631 */
632 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
633 {
634 struct dma_desc *dp;
635 struct ring_info *src_map, *map;
636 struct rx_header *rh;
637 struct sk_buff *skb;
638 dma_addr_t mapping;
639 int dest_idx;
640 u32 ctrl;
641
642 src_map = NULL;
643 if (src_idx >= 0)
644 src_map = &bp->rx_buffers[src_idx];
645 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
646 map = &bp->rx_buffers[dest_idx];
647 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
648 if (skb == NULL)
649 return -ENOMEM;
650
651 mapping = ssb_dma_map_single(bp->sdev, skb->data,
652 RX_PKT_BUF_SZ,
653 DMA_FROM_DEVICE);
654
655 /* Hardware bug work-around, the chip is unable to do PCI DMA
656 to/from anything above 1GB :-( */
657 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
658 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
659 /* Sigh... */
660 if (!ssb_dma_mapping_error(bp->sdev, mapping))
661 ssb_dma_unmap_single(bp->sdev, mapping,
662 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
663 dev_kfree_skb_any(skb);
664 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
665 if (skb == NULL)
666 return -ENOMEM;
667 mapping = ssb_dma_map_single(bp->sdev, skb->data,
668 RX_PKT_BUF_SZ,
669 DMA_FROM_DEVICE);
670 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
671 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
672 if (!ssb_dma_mapping_error(bp->sdev, mapping))
673 ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
674 dev_kfree_skb_any(skb);
675 return -ENOMEM;
676 }
677 bp->force_copybreak = 1;
678 }
679
680 rh = (struct rx_header *) skb->data;
681
682 rh->len = 0;
683 rh->flags = 0;
684
685 map->skb = skb;
686 map->mapping = mapping;
687
688 if (src_map != NULL)
689 src_map->skb = NULL;
690
691 ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
692 if (dest_idx == (B44_RX_RING_SIZE - 1))
693 ctrl |= DESC_CTRL_EOT;
694
695 dp = &bp->rx_ring[dest_idx];
696 dp->ctrl = cpu_to_le32(ctrl);
697 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
698
699 if (bp->flags & B44_FLAG_RX_RING_HACK)
700 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
701 dest_idx * sizeof(*dp),
702 DMA_BIDIRECTIONAL);
703
704 return RX_PKT_BUF_SZ;
705 }
706
707 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
708 {
709 struct dma_desc *src_desc, *dest_desc;
710 struct ring_info *src_map, *dest_map;
711 struct rx_header *rh;
712 int dest_idx;
713 __le32 ctrl;
714
715 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
716 dest_desc = &bp->rx_ring[dest_idx];
717 dest_map = &bp->rx_buffers[dest_idx];
718 src_desc = &bp->rx_ring[src_idx];
719 src_map = &bp->rx_buffers[src_idx];
720
721 dest_map->skb = src_map->skb;
722 rh = (struct rx_header *) src_map->skb->data;
723 rh->len = 0;
724 rh->flags = 0;
725 dest_map->mapping = src_map->mapping;
726
727 if (bp->flags & B44_FLAG_RX_RING_HACK)
728 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
729 src_idx * sizeof(*src_desc),
730 DMA_BIDIRECTIONAL);
731
732 ctrl = src_desc->ctrl;
733 if (dest_idx == (B44_RX_RING_SIZE - 1))
734 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
735 else
736 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
737
738 dest_desc->ctrl = ctrl;
739 dest_desc->addr = src_desc->addr;
740
741 src_map->skb = NULL;
742
743 if (bp->flags & B44_FLAG_RX_RING_HACK)
744 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
745 dest_idx * sizeof(*dest_desc),
746 DMA_BIDIRECTIONAL);
747
748 ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
749 RX_PKT_BUF_SZ,
750 DMA_FROM_DEVICE);
751 }
752
753 static int b44_rx(struct b44 *bp, int budget)
754 {
755 int received;
756 u32 cons, prod;
757
758 received = 0;
759 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
760 prod /= sizeof(struct dma_desc);
761 cons = bp->rx_cons;
762
763 while (cons != prod && budget > 0) {
764 struct ring_info *rp = &bp->rx_buffers[cons];
765 struct sk_buff *skb = rp->skb;
766 dma_addr_t map = rp->mapping;
767 struct rx_header *rh;
768 u16 len;
769
770 ssb_dma_sync_single_for_cpu(bp->sdev, map,
771 RX_PKT_BUF_SZ,
772 DMA_FROM_DEVICE);
773 rh = (struct rx_header *) skb->data;
774 len = le16_to_cpu(rh->len);
775 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
776 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
777 drop_it:
778 b44_recycle_rx(bp, cons, bp->rx_prod);
779 drop_it_no_recycle:
780 bp->dev->stats.rx_dropped++;
781 goto next_pkt;
782 }
783
784 if (len == 0) {
785 int i = 0;
786
787 do {
788 udelay(2);
789 barrier();
790 len = le16_to_cpu(rh->len);
791 } while (len == 0 && i++ < 5);
792 if (len == 0)
793 goto drop_it;
794 }
795
796 /* Omit CRC. */
797 len -= 4;
798
799 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
800 int skb_size;
801 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
802 if (skb_size < 0)
803 goto drop_it;
804 ssb_dma_unmap_single(bp->sdev, map,
805 skb_size, DMA_FROM_DEVICE);
806 /* Leave out rx_header */
807 skb_put(skb, len + RX_PKT_OFFSET);
808 skb_pull(skb, RX_PKT_OFFSET);
809 } else {
810 struct sk_buff *copy_skb;
811
812 b44_recycle_rx(bp, cons, bp->rx_prod);
813 copy_skb = netdev_alloc_skb(bp->dev, len + 2);
814 if (copy_skb == NULL)
815 goto drop_it_no_recycle;
816
817 skb_reserve(copy_skb, 2);
818 skb_put(copy_skb, len);
819 /* DMA sync done above, copy just the actual packet */
820 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
821 copy_skb->data, len);
822 skb = copy_skb;
823 }
824 skb->ip_summed = CHECKSUM_NONE;
825 skb->protocol = eth_type_trans(skb, bp->dev);
826 netif_receive_skb(skb);
827 received++;
828 budget--;
829 next_pkt:
830 bp->rx_prod = (bp->rx_prod + 1) &
831 (B44_RX_RING_SIZE - 1);
832 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
833 }
834
835 bp->rx_cons = cons;
836 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
837
838 return received;
839 }
840
841 static int b44_poll(struct napi_struct *napi, int budget)
842 {
843 struct b44 *bp = container_of(napi, struct b44, napi);
844 int work_done;
845 unsigned long flags;
846
847 spin_lock_irqsave(&bp->lock, flags);
848
849 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
850 /* spin_lock(&bp->tx_lock); */
851 b44_tx(bp);
852 /* spin_unlock(&bp->tx_lock); */
853 }
854 spin_unlock_irqrestore(&bp->lock, flags);
855
856 work_done = 0;
857 if (bp->istat & ISTAT_RX)
858 work_done += b44_rx(bp, budget);
859
860 if (bp->istat & ISTAT_ERRORS) {
861 spin_lock_irqsave(&bp->lock, flags);
862 b44_halt(bp);
863 b44_init_rings(bp);
864 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
865 netif_wake_queue(bp->dev);
866 spin_unlock_irqrestore(&bp->lock, flags);
867 work_done = 0;
868 }
869
870 if (work_done < budget) {
871 napi_complete(napi);
872 b44_enable_ints(bp);
873 }
874
875 return work_done;
876 }
877
878 static irqreturn_t b44_interrupt(int irq, void *dev_id)
879 {
880 struct net_device *dev = dev_id;
881 struct b44 *bp = netdev_priv(dev);
882 u32 istat, imask;
883 int handled = 0;
884
885 spin_lock(&bp->lock);
886
887 istat = br32(bp, B44_ISTAT);
888 imask = br32(bp, B44_IMASK);
889
890 /* The interrupt mask register controls which interrupt bits
891 * will actually raise an interrupt to the CPU when set by hw/firmware,
892 * but doesn't mask off the bits.
893 */
894 istat &= imask;
895 if (istat) {
896 handled = 1;
897
898 if (unlikely(!netif_running(dev))) {
899 netdev_info(dev, "late interrupt\n");
900 goto irq_ack;
901 }
902
903 if (napi_schedule_prep(&bp->napi)) {
904 /* NOTE: These writes are posted by the readback of
905 * the ISTAT register below.
906 */
907 bp->istat = istat;
908 __b44_disable_ints(bp);
909 __napi_schedule(&bp->napi);
910 }
911
912 irq_ack:
913 bw32(bp, B44_ISTAT, istat);
914 br32(bp, B44_ISTAT);
915 }
916 spin_unlock(&bp->lock);
917 return IRQ_RETVAL(handled);
918 }
919
920 static void b44_tx_timeout(struct net_device *dev)
921 {
922 struct b44 *bp = netdev_priv(dev);
923
924 netdev_err(dev, "transmit timed out, resetting\n");
925
926 spin_lock_irq(&bp->lock);
927
928 b44_halt(bp);
929 b44_init_rings(bp);
930 b44_init_hw(bp, B44_FULL_RESET);
931
932 spin_unlock_irq(&bp->lock);
933
934 b44_enable_ints(bp);
935
936 netif_wake_queue(dev);
937 }
938
939 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
940 {
941 struct b44 *bp = netdev_priv(dev);
942 int rc = NETDEV_TX_OK;
943 dma_addr_t mapping;
944 u32 len, entry, ctrl;
945 unsigned long flags;
946
947 len = skb->len;
948 spin_lock_irqsave(&bp->lock, flags);
949
950 /* This is a hard error, log it. */
951 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
952 netif_stop_queue(dev);
953 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
954 goto err_out;
955 }
956
957 mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
958 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
959 struct sk_buff *bounce_skb;
960
961 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
962 if (!ssb_dma_mapping_error(bp->sdev, mapping))
963 ssb_dma_unmap_single(bp->sdev, mapping, len,
964 DMA_TO_DEVICE);
965
966 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
967 if (!bounce_skb)
968 goto err_out;
969
970 mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
971 len, DMA_TO_DEVICE);
972 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
973 if (!ssb_dma_mapping_error(bp->sdev, mapping))
974 ssb_dma_unmap_single(bp->sdev, mapping,
975 len, DMA_TO_DEVICE);
976 dev_kfree_skb_any(bounce_skb);
977 goto err_out;
978 }
979
980 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
981 dev_kfree_skb_any(skb);
982 skb = bounce_skb;
983 }
984
985 entry = bp->tx_prod;
986 bp->tx_buffers[entry].skb = skb;
987 bp->tx_buffers[entry].mapping = mapping;
988
989 ctrl = (len & DESC_CTRL_LEN);
990 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
991 if (entry == (B44_TX_RING_SIZE - 1))
992 ctrl |= DESC_CTRL_EOT;
993
994 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
995 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
996
997 if (bp->flags & B44_FLAG_TX_RING_HACK)
998 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
999 entry * sizeof(bp->tx_ring[0]),
1000 DMA_TO_DEVICE);
1001
1002 entry = NEXT_TX(entry);
1003
1004 bp->tx_prod = entry;
1005
1006 wmb();
1007
1008 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1009 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1010 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1011 if (bp->flags & B44_FLAG_REORDER_BUG)
1012 br32(bp, B44_DMATX_PTR);
1013
1014 if (TX_BUFFS_AVAIL(bp) < 1)
1015 netif_stop_queue(dev);
1016
1017 out_unlock:
1018 spin_unlock_irqrestore(&bp->lock, flags);
1019
1020 return rc;
1021
1022 err_out:
1023 rc = NETDEV_TX_BUSY;
1024 goto out_unlock;
1025 }
1026
1027 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1028 {
1029 struct b44 *bp = netdev_priv(dev);
1030
1031 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1032 return -EINVAL;
1033
1034 if (!netif_running(dev)) {
1035 /* We'll just catch it later when the
1036 * device is up'd.
1037 */
1038 dev->mtu = new_mtu;
1039 return 0;
1040 }
1041
1042 spin_lock_irq(&bp->lock);
1043 b44_halt(bp);
1044 dev->mtu = new_mtu;
1045 b44_init_rings(bp);
1046 b44_init_hw(bp, B44_FULL_RESET);
1047 spin_unlock_irq(&bp->lock);
1048
1049 b44_enable_ints(bp);
1050
1051 return 0;
1052 }
1053
1054 /* Free up pending packets in all rx/tx rings.
1055 *
1056 * The chip has been shut down and the driver detached from
1057 * the networking, so no interrupts or new tx packets will
1058 * end up in the driver. bp->lock is not held and we are not
1059 * in an interrupt context and thus may sleep.
1060 */
1061 static void b44_free_rings(struct b44 *bp)
1062 {
1063 struct ring_info *rp;
1064 int i;
1065
1066 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1067 rp = &bp->rx_buffers[i];
1068
1069 if (rp->skb == NULL)
1070 continue;
1071 ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
1072 DMA_FROM_DEVICE);
1073 dev_kfree_skb_any(rp->skb);
1074 rp->skb = NULL;
1075 }
1076
1077 /* XXX needs changes once NETIF_F_SG is set... */
1078 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1079 rp = &bp->tx_buffers[i];
1080
1081 if (rp->skb == NULL)
1082 continue;
1083 ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
1084 DMA_TO_DEVICE);
1085 dev_kfree_skb_any(rp->skb);
1086 rp->skb = NULL;
1087 }
1088 }
1089
1090 /* Initialize tx/rx rings for packet processing.
1091 *
1092 * The chip has been shut down and the driver detached from
1093 * the networking, so no interrupts or new tx packets will
1094 * end up in the driver.
1095 */
1096 static void b44_init_rings(struct b44 *bp)
1097 {
1098 int i;
1099
1100 b44_free_rings(bp);
1101
1102 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1103 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1104
1105 if (bp->flags & B44_FLAG_RX_RING_HACK)
1106 ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
1107 DMA_TABLE_BYTES,
1108 DMA_BIDIRECTIONAL);
1109
1110 if (bp->flags & B44_FLAG_TX_RING_HACK)
1111 ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
1112 DMA_TABLE_BYTES,
1113 DMA_TO_DEVICE);
1114
1115 for (i = 0; i < bp->rx_pending; i++) {
1116 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1117 break;
1118 }
1119 }
1120
1121 /*
1122 * Must not be invoked with interrupt sources disabled and
1123 * the hardware shutdown down.
1124 */
1125 static void b44_free_consistent(struct b44 *bp)
1126 {
1127 kfree(bp->rx_buffers);
1128 bp->rx_buffers = NULL;
1129 kfree(bp->tx_buffers);
1130 bp->tx_buffers = NULL;
1131 if (bp->rx_ring) {
1132 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1133 ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
1134 DMA_TABLE_BYTES,
1135 DMA_BIDIRECTIONAL);
1136 kfree(bp->rx_ring);
1137 } else
1138 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1139 bp->rx_ring, bp->rx_ring_dma,
1140 GFP_KERNEL);
1141 bp->rx_ring = NULL;
1142 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1143 }
1144 if (bp->tx_ring) {
1145 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1146 ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
1147 DMA_TABLE_BYTES,
1148 DMA_TO_DEVICE);
1149 kfree(bp->tx_ring);
1150 } else
1151 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1152 bp->tx_ring, bp->tx_ring_dma,
1153 GFP_KERNEL);
1154 bp->tx_ring = NULL;
1155 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1156 }
1157 }
1158
1159 /*
1160 * Must not be invoked with interrupt sources disabled and
1161 * the hardware shutdown down. Can sleep.
1162 */
1163 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1164 {
1165 int size;
1166
1167 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1168 bp->rx_buffers = kzalloc(size, gfp);
1169 if (!bp->rx_buffers)
1170 goto out_err;
1171
1172 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1173 bp->tx_buffers = kzalloc(size, gfp);
1174 if (!bp->tx_buffers)
1175 goto out_err;
1176
1177 size = DMA_TABLE_BYTES;
1178 bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
1179 if (!bp->rx_ring) {
1180 /* Allocation may have failed due to pci_alloc_consistent
1181 insisting on use of GFP_DMA, which is more restrictive
1182 than necessary... */
1183 struct dma_desc *rx_ring;
1184 dma_addr_t rx_ring_dma;
1185
1186 rx_ring = kzalloc(size, gfp);
1187 if (!rx_ring)
1188 goto out_err;
1189
1190 rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
1191 DMA_TABLE_BYTES,
1192 DMA_BIDIRECTIONAL);
1193
1194 if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
1195 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1196 kfree(rx_ring);
1197 goto out_err;
1198 }
1199
1200 bp->rx_ring = rx_ring;
1201 bp->rx_ring_dma = rx_ring_dma;
1202 bp->flags |= B44_FLAG_RX_RING_HACK;
1203 }
1204
1205 bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
1206 if (!bp->tx_ring) {
1207 /* Allocation may have failed due to ssb_dma_alloc_consistent
1208 insisting on use of GFP_DMA, which is more restrictive
1209 than necessary... */
1210 struct dma_desc *tx_ring;
1211 dma_addr_t tx_ring_dma;
1212
1213 tx_ring = kzalloc(size, gfp);
1214 if (!tx_ring)
1215 goto out_err;
1216
1217 tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
1218 DMA_TABLE_BYTES,
1219 DMA_TO_DEVICE);
1220
1221 if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
1222 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1223 kfree(tx_ring);
1224 goto out_err;
1225 }
1226
1227 bp->tx_ring = tx_ring;
1228 bp->tx_ring_dma = tx_ring_dma;
1229 bp->flags |= B44_FLAG_TX_RING_HACK;
1230 }
1231
1232 return 0;
1233
1234 out_err:
1235 b44_free_consistent(bp);
1236 return -ENOMEM;
1237 }
1238
1239 /* bp->lock is held. */
1240 static void b44_clear_stats(struct b44 *bp)
1241 {
1242 unsigned long reg;
1243
1244 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1245 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1246 br32(bp, reg);
1247 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1248 br32(bp, reg);
1249 }
1250
1251 /* bp->lock is held. */
1252 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1253 {
1254 struct ssb_device *sdev = bp->sdev;
1255 bool was_enabled;
1256
1257 was_enabled = ssb_device_is_enabled(bp->sdev);
1258
1259 ssb_device_enable(bp->sdev, 0);
1260 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1261
1262 if (was_enabled) {
1263 bw32(bp, B44_RCV_LAZY, 0);
1264 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1265 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1266 bw32(bp, B44_DMATX_CTRL, 0);
1267 bp->tx_prod = bp->tx_cons = 0;
1268 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1269 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1270 100, 0);
1271 }
1272 bw32(bp, B44_DMARX_CTRL, 0);
1273 bp->rx_prod = bp->rx_cons = 0;
1274 }
1275
1276 b44_clear_stats(bp);
1277
1278 /*
1279 * Don't enable PHY if we are doing a partial reset
1280 * we are probably going to power down
1281 */
1282 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1283 return;
1284
1285 switch (sdev->bus->bustype) {
1286 case SSB_BUSTYPE_SSB:
1287 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1288 (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1289 B44_MDC_RATIO)
1290 & MDIO_CTRL_MAXF_MASK)));
1291 break;
1292 case SSB_BUSTYPE_PCI:
1293 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1294 (0x0d & MDIO_CTRL_MAXF_MASK)));
1295 break;
1296 case SSB_BUSTYPE_PCMCIA:
1297 case SSB_BUSTYPE_SDIO:
1298 WARN_ON(1); /* A device with this bus does not exist. */
1299 break;
1300 }
1301
1302 br32(bp, B44_MDIO_CTRL);
1303
1304 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1305 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1306 br32(bp, B44_ENET_CTRL);
1307 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1308 } else {
1309 u32 val = br32(bp, B44_DEVCTRL);
1310
1311 if (val & DEVCTRL_EPR) {
1312 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1313 br32(bp, B44_DEVCTRL);
1314 udelay(100);
1315 }
1316 bp->flags |= B44_FLAG_INTERNAL_PHY;
1317 }
1318 }
1319
1320 /* bp->lock is held. */
1321 static void b44_halt(struct b44 *bp)
1322 {
1323 b44_disable_ints(bp);
1324 /* reset PHY */
1325 b44_phy_reset(bp);
1326 /* power down PHY */
1327 netdev_info(bp->dev, "powering down PHY\n");
1328 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1329 /* now reset the chip, but without enabling the MAC&PHY
1330 * part of it. This has to be done _after_ we shut down the PHY */
1331 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1332 }
1333
1334 /* bp->lock is held. */
1335 static void __b44_set_mac_addr(struct b44 *bp)
1336 {
1337 bw32(bp, B44_CAM_CTRL, 0);
1338 if (!(bp->dev->flags & IFF_PROMISC)) {
1339 u32 val;
1340
1341 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1342 val = br32(bp, B44_CAM_CTRL);
1343 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1344 }
1345 }
1346
1347 static int b44_set_mac_addr(struct net_device *dev, void *p)
1348 {
1349 struct b44 *bp = netdev_priv(dev);
1350 struct sockaddr *addr = p;
1351 u32 val;
1352
1353 if (netif_running(dev))
1354 return -EBUSY;
1355
1356 if (!is_valid_ether_addr(addr->sa_data))
1357 return -EINVAL;
1358
1359 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1360
1361 spin_lock_irq(&bp->lock);
1362
1363 val = br32(bp, B44_RXCONFIG);
1364 if (!(val & RXCONFIG_CAM_ABSENT))
1365 __b44_set_mac_addr(bp);
1366
1367 spin_unlock_irq(&bp->lock);
1368
1369 return 0;
1370 }
1371
1372 /* Called at device open time to get the chip ready for
1373 * packet processing. Invoked with bp->lock held.
1374 */
1375 static void __b44_set_rx_mode(struct net_device *);
1376 static void b44_init_hw(struct b44 *bp, int reset_kind)
1377 {
1378 u32 val;
1379
1380 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1381 if (reset_kind == B44_FULL_RESET) {
1382 b44_phy_reset(bp);
1383 b44_setup_phy(bp);
1384 }
1385
1386 /* Enable CRC32, set proper LED modes and power on PHY */
1387 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1388 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1389
1390 /* This sets the MAC address too. */
1391 __b44_set_rx_mode(bp->dev);
1392
1393 /* MTU + eth header + possible VLAN tag + struct rx_header */
1394 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1395 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1396
1397 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1398 if (reset_kind == B44_PARTIAL_RESET) {
1399 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1400 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1401 } else {
1402 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1403 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1404 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1405 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1406 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1407
1408 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1409 bp->rx_prod = bp->rx_pending;
1410
1411 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1412 }
1413
1414 val = br32(bp, B44_ENET_CTRL);
1415 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1416 }
1417
1418 static int b44_open(struct net_device *dev)
1419 {
1420 struct b44 *bp = netdev_priv(dev);
1421 int err;
1422
1423 err = b44_alloc_consistent(bp, GFP_KERNEL);
1424 if (err)
1425 goto out;
1426
1427 napi_enable(&bp->napi);
1428
1429 b44_init_rings(bp);
1430 b44_init_hw(bp, B44_FULL_RESET);
1431
1432 b44_check_phy(bp);
1433
1434 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1435 if (unlikely(err < 0)) {
1436 napi_disable(&bp->napi);
1437 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1438 b44_free_rings(bp);
1439 b44_free_consistent(bp);
1440 goto out;
1441 }
1442
1443 init_timer(&bp->timer);
1444 bp->timer.expires = jiffies + HZ;
1445 bp->timer.data = (unsigned long) bp;
1446 bp->timer.function = b44_timer;
1447 add_timer(&bp->timer);
1448
1449 b44_enable_ints(bp);
1450 netif_start_queue(dev);
1451 out:
1452 return err;
1453 }
1454
1455 #ifdef CONFIG_NET_POLL_CONTROLLER
1456 /*
1457 * Polling receive - used by netconsole and other diagnostic tools
1458 * to allow network i/o with interrupts disabled.
1459 */
1460 static void b44_poll_controller(struct net_device *dev)
1461 {
1462 disable_irq(dev->irq);
1463 b44_interrupt(dev->irq, dev);
1464 enable_irq(dev->irq);
1465 }
1466 #endif
1467
1468 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1469 {
1470 u32 i;
1471 u32 *pattern = (u32 *) pp;
1472
1473 for (i = 0; i < bytes; i += sizeof(u32)) {
1474 bw32(bp, B44_FILT_ADDR, table_offset + i);
1475 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1476 }
1477 }
1478
1479 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1480 {
1481 int magicsync = 6;
1482 int k, j, len = offset;
1483 int ethaddr_bytes = ETH_ALEN;
1484
1485 memset(ppattern + offset, 0xff, magicsync);
1486 for (j = 0; j < magicsync; j++)
1487 set_bit(len++, (unsigned long *) pmask);
1488
1489 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1490 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1491 ethaddr_bytes = ETH_ALEN;
1492 else
1493 ethaddr_bytes = B44_PATTERN_SIZE - len;
1494 if (ethaddr_bytes <=0)
1495 break;
1496 for (k = 0; k< ethaddr_bytes; k++) {
1497 ppattern[offset + magicsync +
1498 (j * ETH_ALEN) + k] = macaddr[k];
1499 set_bit(len++, (unsigned long *) pmask);
1500 }
1501 }
1502 return len - 1;
1503 }
1504
1505 /* Setup magic packet patterns in the b44 WOL
1506 * pattern matching filter.
1507 */
1508 static void b44_setup_pseudo_magicp(struct b44 *bp)
1509 {
1510
1511 u32 val;
1512 int plen0, plen1, plen2;
1513 u8 *pwol_pattern;
1514 u8 pwol_mask[B44_PMASK_SIZE];
1515
1516 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1517 if (!pwol_pattern) {
1518 pr_err("Memory not available for WOL\n");
1519 return;
1520 }
1521
1522 /* Ipv4 magic packet pattern - pattern 0.*/
1523 memset(pwol_mask, 0, B44_PMASK_SIZE);
1524 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1525 B44_ETHIPV4UDP_HLEN);
1526
1527 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1528 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1529
1530 /* Raw ethernet II magic packet pattern - pattern 1 */
1531 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1532 memset(pwol_mask, 0, B44_PMASK_SIZE);
1533 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1534 ETH_HLEN);
1535
1536 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1537 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1538 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1539 B44_PMASK_BASE + B44_PMASK_SIZE);
1540
1541 /* Ipv6 magic packet pattern - pattern 2 */
1542 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1543 memset(pwol_mask, 0, B44_PMASK_SIZE);
1544 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1545 B44_ETHIPV6UDP_HLEN);
1546
1547 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1548 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1549 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1550 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1551
1552 kfree(pwol_pattern);
1553
1554 /* set these pattern's lengths: one less than each real length */
1555 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1556 bw32(bp, B44_WKUP_LEN, val);
1557
1558 /* enable wakeup pattern matching */
1559 val = br32(bp, B44_DEVCTRL);
1560 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1561
1562 }
1563
1564 #ifdef CONFIG_B44_PCI
1565 static void b44_setup_wol_pci(struct b44 *bp)
1566 {
1567 u16 val;
1568
1569 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1570 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1571 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1572 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1573 }
1574 }
1575 #else
1576 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1577 #endif /* CONFIG_B44_PCI */
1578
1579 static void b44_setup_wol(struct b44 *bp)
1580 {
1581 u32 val;
1582
1583 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1584
1585 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1586
1587 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1588
1589 val = bp->dev->dev_addr[2] << 24 |
1590 bp->dev->dev_addr[3] << 16 |
1591 bp->dev->dev_addr[4] << 8 |
1592 bp->dev->dev_addr[5];
1593 bw32(bp, B44_ADDR_LO, val);
1594
1595 val = bp->dev->dev_addr[0] << 8 |
1596 bp->dev->dev_addr[1];
1597 bw32(bp, B44_ADDR_HI, val);
1598
1599 val = br32(bp, B44_DEVCTRL);
1600 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1601
1602 } else {
1603 b44_setup_pseudo_magicp(bp);
1604 }
1605 b44_setup_wol_pci(bp);
1606 }
1607
1608 static int b44_close(struct net_device *dev)
1609 {
1610 struct b44 *bp = netdev_priv(dev);
1611
1612 netif_stop_queue(dev);
1613
1614 napi_disable(&bp->napi);
1615
1616 del_timer_sync(&bp->timer);
1617
1618 spin_lock_irq(&bp->lock);
1619
1620 b44_halt(bp);
1621 b44_free_rings(bp);
1622 netif_carrier_off(dev);
1623
1624 spin_unlock_irq(&bp->lock);
1625
1626 free_irq(dev->irq, dev);
1627
1628 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1629 b44_init_hw(bp, B44_PARTIAL_RESET);
1630 b44_setup_wol(bp);
1631 }
1632
1633 b44_free_consistent(bp);
1634
1635 return 0;
1636 }
1637
1638 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1639 {
1640 struct b44 *bp = netdev_priv(dev);
1641 struct net_device_stats *nstat = &dev->stats;
1642 struct b44_hw_stats *hwstat = &bp->hw_stats;
1643
1644 /* Convert HW stats into netdevice stats. */
1645 nstat->rx_packets = hwstat->rx_pkts;
1646 nstat->tx_packets = hwstat->tx_pkts;
1647 nstat->rx_bytes = hwstat->rx_octets;
1648 nstat->tx_bytes = hwstat->tx_octets;
1649 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1650 hwstat->tx_oversize_pkts +
1651 hwstat->tx_underruns +
1652 hwstat->tx_excessive_cols +
1653 hwstat->tx_late_cols);
1654 nstat->multicast = hwstat->tx_multicast_pkts;
1655 nstat->collisions = hwstat->tx_total_cols;
1656
1657 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1658 hwstat->rx_undersize);
1659 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1660 nstat->rx_frame_errors = hwstat->rx_align_errs;
1661 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1662 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1663 hwstat->rx_oversize_pkts +
1664 hwstat->rx_missed_pkts +
1665 hwstat->rx_crc_align_errs +
1666 hwstat->rx_undersize +
1667 hwstat->rx_crc_errs +
1668 hwstat->rx_align_errs +
1669 hwstat->rx_symbol_errs);
1670
1671 nstat->tx_aborted_errors = hwstat->tx_underruns;
1672 #if 0
1673 /* Carrier lost counter seems to be broken for some devices */
1674 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1675 #endif
1676
1677 return nstat;
1678 }
1679
1680 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1681 {
1682 struct netdev_hw_addr *ha;
1683 int i, num_ents;
1684
1685 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1686 i = 0;
1687 netdev_for_each_mc_addr(ha, dev) {
1688 if (i == num_ents)
1689 break;
1690 __b44_cam_write(bp, ha->addr, i++ + 1);
1691 }
1692 return i+1;
1693 }
1694
1695 static void __b44_set_rx_mode(struct net_device *dev)
1696 {
1697 struct b44 *bp = netdev_priv(dev);
1698 u32 val;
1699
1700 val = br32(bp, B44_RXCONFIG);
1701 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1702 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1703 val |= RXCONFIG_PROMISC;
1704 bw32(bp, B44_RXCONFIG, val);
1705 } else {
1706 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1707 int i = 1;
1708
1709 __b44_set_mac_addr(bp);
1710
1711 if ((dev->flags & IFF_ALLMULTI) ||
1712 (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1713 val |= RXCONFIG_ALLMULTI;
1714 else
1715 i = __b44_load_mcast(bp, dev);
1716
1717 for (; i < 64; i++)
1718 __b44_cam_write(bp, zero, i);
1719
1720 bw32(bp, B44_RXCONFIG, val);
1721 val = br32(bp, B44_CAM_CTRL);
1722 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1723 }
1724 }
1725
1726 static void b44_set_rx_mode(struct net_device *dev)
1727 {
1728 struct b44 *bp = netdev_priv(dev);
1729
1730 spin_lock_irq(&bp->lock);
1731 __b44_set_rx_mode(dev);
1732 spin_unlock_irq(&bp->lock);
1733 }
1734
1735 static u32 b44_get_msglevel(struct net_device *dev)
1736 {
1737 struct b44 *bp = netdev_priv(dev);
1738 return bp->msg_enable;
1739 }
1740
1741 static void b44_set_msglevel(struct net_device *dev, u32 value)
1742 {
1743 struct b44 *bp = netdev_priv(dev);
1744 bp->msg_enable = value;
1745 }
1746
1747 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1748 {
1749 struct b44 *bp = netdev_priv(dev);
1750 struct ssb_bus *bus = bp->sdev->bus;
1751
1752 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1753 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1754 switch (bus->bustype) {
1755 case SSB_BUSTYPE_PCI:
1756 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1757 break;
1758 case SSB_BUSTYPE_SSB:
1759 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1760 break;
1761 case SSB_BUSTYPE_PCMCIA:
1762 case SSB_BUSTYPE_SDIO:
1763 WARN_ON(1); /* A device with this bus does not exist. */
1764 break;
1765 }
1766 }
1767
1768 static int b44_nway_reset(struct net_device *dev)
1769 {
1770 struct b44 *bp = netdev_priv(dev);
1771 u32 bmcr;
1772 int r;
1773
1774 spin_lock_irq(&bp->lock);
1775 b44_readphy(bp, MII_BMCR, &bmcr);
1776 b44_readphy(bp, MII_BMCR, &bmcr);
1777 r = -EINVAL;
1778 if (bmcr & BMCR_ANENABLE) {
1779 b44_writephy(bp, MII_BMCR,
1780 bmcr | BMCR_ANRESTART);
1781 r = 0;
1782 }
1783 spin_unlock_irq(&bp->lock);
1784
1785 return r;
1786 }
1787
1788 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1789 {
1790 struct b44 *bp = netdev_priv(dev);
1791
1792 cmd->supported = (SUPPORTED_Autoneg);
1793 cmd->supported |= (SUPPORTED_100baseT_Half |
1794 SUPPORTED_100baseT_Full |
1795 SUPPORTED_10baseT_Half |
1796 SUPPORTED_10baseT_Full |
1797 SUPPORTED_MII);
1798
1799 cmd->advertising = 0;
1800 if (bp->flags & B44_FLAG_ADV_10HALF)
1801 cmd->advertising |= ADVERTISED_10baseT_Half;
1802 if (bp->flags & B44_FLAG_ADV_10FULL)
1803 cmd->advertising |= ADVERTISED_10baseT_Full;
1804 if (bp->flags & B44_FLAG_ADV_100HALF)
1805 cmd->advertising |= ADVERTISED_100baseT_Half;
1806 if (bp->flags & B44_FLAG_ADV_100FULL)
1807 cmd->advertising |= ADVERTISED_100baseT_Full;
1808 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1809 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1810 SPEED_100 : SPEED_10;
1811 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1812 DUPLEX_FULL : DUPLEX_HALF;
1813 cmd->port = 0;
1814 cmd->phy_address = bp->phy_addr;
1815 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1816 XCVR_INTERNAL : XCVR_EXTERNAL;
1817 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1818 AUTONEG_DISABLE : AUTONEG_ENABLE;
1819 if (cmd->autoneg == AUTONEG_ENABLE)
1820 cmd->advertising |= ADVERTISED_Autoneg;
1821 if (!netif_running(dev)){
1822 cmd->speed = 0;
1823 cmd->duplex = 0xff;
1824 }
1825 cmd->maxtxpkt = 0;
1826 cmd->maxrxpkt = 0;
1827 return 0;
1828 }
1829
1830 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1831 {
1832 struct b44 *bp = netdev_priv(dev);
1833
1834 /* We do not support gigabit. */
1835 if (cmd->autoneg == AUTONEG_ENABLE) {
1836 if (cmd->advertising &
1837 (ADVERTISED_1000baseT_Half |
1838 ADVERTISED_1000baseT_Full))
1839 return -EINVAL;
1840 } else if ((cmd->speed != SPEED_100 &&
1841 cmd->speed != SPEED_10) ||
1842 (cmd->duplex != DUPLEX_HALF &&
1843 cmd->duplex != DUPLEX_FULL)) {
1844 return -EINVAL;
1845 }
1846
1847 spin_lock_irq(&bp->lock);
1848
1849 if (cmd->autoneg == AUTONEG_ENABLE) {
1850 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1851 B44_FLAG_100_BASE_T |
1852 B44_FLAG_FULL_DUPLEX |
1853 B44_FLAG_ADV_10HALF |
1854 B44_FLAG_ADV_10FULL |
1855 B44_FLAG_ADV_100HALF |
1856 B44_FLAG_ADV_100FULL);
1857 if (cmd->advertising == 0) {
1858 bp->flags |= (B44_FLAG_ADV_10HALF |
1859 B44_FLAG_ADV_10FULL |
1860 B44_FLAG_ADV_100HALF |
1861 B44_FLAG_ADV_100FULL);
1862 } else {
1863 if (cmd->advertising & ADVERTISED_10baseT_Half)
1864 bp->flags |= B44_FLAG_ADV_10HALF;
1865 if (cmd->advertising & ADVERTISED_10baseT_Full)
1866 bp->flags |= B44_FLAG_ADV_10FULL;
1867 if (cmd->advertising & ADVERTISED_100baseT_Half)
1868 bp->flags |= B44_FLAG_ADV_100HALF;
1869 if (cmd->advertising & ADVERTISED_100baseT_Full)
1870 bp->flags |= B44_FLAG_ADV_100FULL;
1871 }
1872 } else {
1873 bp->flags |= B44_FLAG_FORCE_LINK;
1874 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1875 if (cmd->speed == SPEED_100)
1876 bp->flags |= B44_FLAG_100_BASE_T;
1877 if (cmd->duplex == DUPLEX_FULL)
1878 bp->flags |= B44_FLAG_FULL_DUPLEX;
1879 }
1880
1881 if (netif_running(dev))
1882 b44_setup_phy(bp);
1883
1884 spin_unlock_irq(&bp->lock);
1885
1886 return 0;
1887 }
1888
1889 static void b44_get_ringparam(struct net_device *dev,
1890 struct ethtool_ringparam *ering)
1891 {
1892 struct b44 *bp = netdev_priv(dev);
1893
1894 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1895 ering->rx_pending = bp->rx_pending;
1896
1897 /* XXX ethtool lacks a tx_max_pending, oops... */
1898 }
1899
1900 static int b44_set_ringparam(struct net_device *dev,
1901 struct ethtool_ringparam *ering)
1902 {
1903 struct b44 *bp = netdev_priv(dev);
1904
1905 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1906 (ering->rx_mini_pending != 0) ||
1907 (ering->rx_jumbo_pending != 0) ||
1908 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1909 return -EINVAL;
1910
1911 spin_lock_irq(&bp->lock);
1912
1913 bp->rx_pending = ering->rx_pending;
1914 bp->tx_pending = ering->tx_pending;
1915
1916 b44_halt(bp);
1917 b44_init_rings(bp);
1918 b44_init_hw(bp, B44_FULL_RESET);
1919 netif_wake_queue(bp->dev);
1920 spin_unlock_irq(&bp->lock);
1921
1922 b44_enable_ints(bp);
1923
1924 return 0;
1925 }
1926
1927 static void b44_get_pauseparam(struct net_device *dev,
1928 struct ethtool_pauseparam *epause)
1929 {
1930 struct b44 *bp = netdev_priv(dev);
1931
1932 epause->autoneg =
1933 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1934 epause->rx_pause =
1935 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1936 epause->tx_pause =
1937 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1938 }
1939
1940 static int b44_set_pauseparam(struct net_device *dev,
1941 struct ethtool_pauseparam *epause)
1942 {
1943 struct b44 *bp = netdev_priv(dev);
1944
1945 spin_lock_irq(&bp->lock);
1946 if (epause->autoneg)
1947 bp->flags |= B44_FLAG_PAUSE_AUTO;
1948 else
1949 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1950 if (epause->rx_pause)
1951 bp->flags |= B44_FLAG_RX_PAUSE;
1952 else
1953 bp->flags &= ~B44_FLAG_RX_PAUSE;
1954 if (epause->tx_pause)
1955 bp->flags |= B44_FLAG_TX_PAUSE;
1956 else
1957 bp->flags &= ~B44_FLAG_TX_PAUSE;
1958 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1959 b44_halt(bp);
1960 b44_init_rings(bp);
1961 b44_init_hw(bp, B44_FULL_RESET);
1962 } else {
1963 __b44_set_flow_ctrl(bp, bp->flags);
1964 }
1965 spin_unlock_irq(&bp->lock);
1966
1967 b44_enable_ints(bp);
1968
1969 return 0;
1970 }
1971
1972 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1973 {
1974 switch(stringset) {
1975 case ETH_SS_STATS:
1976 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1977 break;
1978 }
1979 }
1980
1981 static int b44_get_sset_count(struct net_device *dev, int sset)
1982 {
1983 switch (sset) {
1984 case ETH_SS_STATS:
1985 return ARRAY_SIZE(b44_gstrings);
1986 default:
1987 return -EOPNOTSUPP;
1988 }
1989 }
1990
1991 static void b44_get_ethtool_stats(struct net_device *dev,
1992 struct ethtool_stats *stats, u64 *data)
1993 {
1994 struct b44 *bp = netdev_priv(dev);
1995 u32 *val = &bp->hw_stats.tx_good_octets;
1996 u32 i;
1997
1998 spin_lock_irq(&bp->lock);
1999
2000 b44_stats_update(bp);
2001
2002 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2003 *data++ = *val++;
2004
2005 spin_unlock_irq(&bp->lock);
2006 }
2007
2008 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2009 {
2010 struct b44 *bp = netdev_priv(dev);
2011
2012 wol->supported = WAKE_MAGIC;
2013 if (bp->flags & B44_FLAG_WOL_ENABLE)
2014 wol->wolopts = WAKE_MAGIC;
2015 else
2016 wol->wolopts = 0;
2017 memset(&wol->sopass, 0, sizeof(wol->sopass));
2018 }
2019
2020 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2021 {
2022 struct b44 *bp = netdev_priv(dev);
2023
2024 spin_lock_irq(&bp->lock);
2025 if (wol->wolopts & WAKE_MAGIC)
2026 bp->flags |= B44_FLAG_WOL_ENABLE;
2027 else
2028 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2029 spin_unlock_irq(&bp->lock);
2030
2031 return 0;
2032 }
2033
2034 static const struct ethtool_ops b44_ethtool_ops = {
2035 .get_drvinfo = b44_get_drvinfo,
2036 .get_settings = b44_get_settings,
2037 .set_settings = b44_set_settings,
2038 .nway_reset = b44_nway_reset,
2039 .get_link = ethtool_op_get_link,
2040 .get_wol = b44_get_wol,
2041 .set_wol = b44_set_wol,
2042 .get_ringparam = b44_get_ringparam,
2043 .set_ringparam = b44_set_ringparam,
2044 .get_pauseparam = b44_get_pauseparam,
2045 .set_pauseparam = b44_set_pauseparam,
2046 .get_msglevel = b44_get_msglevel,
2047 .set_msglevel = b44_set_msglevel,
2048 .get_strings = b44_get_strings,
2049 .get_sset_count = b44_get_sset_count,
2050 .get_ethtool_stats = b44_get_ethtool_stats,
2051 };
2052
2053 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2054 {
2055 struct mii_ioctl_data *data = if_mii(ifr);
2056 struct b44 *bp = netdev_priv(dev);
2057 int err = -EINVAL;
2058
2059 if (!netif_running(dev))
2060 goto out;
2061
2062 spin_lock_irq(&bp->lock);
2063 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2064 spin_unlock_irq(&bp->lock);
2065 out:
2066 return err;
2067 }
2068
2069 static int __devinit b44_get_invariants(struct b44 *bp)
2070 {
2071 struct ssb_device *sdev = bp->sdev;
2072 int err = 0;
2073 u8 *addr;
2074
2075 bp->dma_offset = ssb_dma_translation(sdev);
2076
2077 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2078 instance > 1) {
2079 addr = sdev->bus->sprom.et1mac;
2080 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2081 } else {
2082 addr = sdev->bus->sprom.et0mac;
2083 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2084 }
2085 /* Some ROMs have buggy PHY addresses with the high
2086 * bits set (sign extension?). Truncate them to a
2087 * valid PHY address. */
2088 bp->phy_addr &= 0x1F;
2089
2090 memcpy(bp->dev->dev_addr, addr, 6);
2091
2092 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2093 pr_err("Invalid MAC address found in EEPROM\n");
2094 return -EINVAL;
2095 }
2096
2097 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2098
2099 bp->imask = IMASK_DEF;
2100
2101 /* XXX - really required?
2102 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2103 */
2104
2105 if (bp->sdev->id.revision >= 7)
2106 bp->flags |= B44_FLAG_B0_ANDLATER;
2107
2108 return err;
2109 }
2110
2111 static const struct net_device_ops b44_netdev_ops = {
2112 .ndo_open = b44_open,
2113 .ndo_stop = b44_close,
2114 .ndo_start_xmit = b44_start_xmit,
2115 .ndo_get_stats = b44_get_stats,
2116 .ndo_set_multicast_list = b44_set_rx_mode,
2117 .ndo_set_mac_address = b44_set_mac_addr,
2118 .ndo_validate_addr = eth_validate_addr,
2119 .ndo_do_ioctl = b44_ioctl,
2120 .ndo_tx_timeout = b44_tx_timeout,
2121 .ndo_change_mtu = b44_change_mtu,
2122 #ifdef CONFIG_NET_POLL_CONTROLLER
2123 .ndo_poll_controller = b44_poll_controller,
2124 #endif
2125 };
2126
2127 static int __devinit b44_init_one(struct ssb_device *sdev,
2128 const struct ssb_device_id *ent)
2129 {
2130 static int b44_version_printed = 0;
2131 struct net_device *dev;
2132 struct b44 *bp;
2133 int err;
2134
2135 instance++;
2136
2137 if (b44_version_printed++ == 0)
2138 pr_info("%s", version);
2139
2140
2141 dev = alloc_etherdev(sizeof(*bp));
2142 if (!dev) {
2143 dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
2144 err = -ENOMEM;
2145 goto out;
2146 }
2147
2148 SET_NETDEV_DEV(dev, sdev->dev);
2149
2150 /* No interesting netdevice features in this card... */
2151 dev->features |= 0;
2152
2153 bp = netdev_priv(dev);
2154 bp->sdev = sdev;
2155 bp->dev = dev;
2156 bp->force_copybreak = 0;
2157
2158 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2159
2160 spin_lock_init(&bp->lock);
2161
2162 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2163 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2164
2165 dev->netdev_ops = &b44_netdev_ops;
2166 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2167 dev->watchdog_timeo = B44_TX_TIMEOUT;
2168 dev->irq = sdev->irq;
2169 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2170
2171 netif_carrier_off(dev);
2172
2173 err = ssb_bus_powerup(sdev->bus, 0);
2174 if (err) {
2175 dev_err(sdev->dev,
2176 "Failed to powerup the bus\n");
2177 goto err_out_free_dev;
2178 }
2179 err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
2180 if (err) {
2181 dev_err(sdev->dev,
2182 "Required 30BIT DMA mask unsupported by the system\n");
2183 goto err_out_powerdown;
2184 }
2185 err = b44_get_invariants(bp);
2186 if (err) {
2187 dev_err(sdev->dev,
2188 "Problem fetching invariants of chip, aborting\n");
2189 goto err_out_powerdown;
2190 }
2191
2192 bp->mii_if.dev = dev;
2193 bp->mii_if.mdio_read = b44_mii_read;
2194 bp->mii_if.mdio_write = b44_mii_write;
2195 bp->mii_if.phy_id = bp->phy_addr;
2196 bp->mii_if.phy_id_mask = 0x1f;
2197 bp->mii_if.reg_num_mask = 0x1f;
2198
2199 /* By default, advertise all speed/duplex settings. */
2200 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2201 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2202
2203 /* By default, auto-negotiate PAUSE. */
2204 bp->flags |= B44_FLAG_PAUSE_AUTO;
2205
2206 err = register_netdev(dev);
2207 if (err) {
2208 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2209 goto err_out_powerdown;
2210 }
2211
2212 ssb_set_drvdata(sdev, dev);
2213
2214 /* Chip reset provides power to the b44 MAC & PCI cores, which
2215 * is necessary for MAC register access.
2216 */
2217 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2218
2219 /* do a phy reset to test if there is an active phy */
2220 if (b44_phy_reset(bp) < 0)
2221 bp->phy_addr = B44_PHY_ADDR_NO_PHY;
2222
2223 netdev_info(dev, "Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
2224 dev->dev_addr);
2225
2226 return 0;
2227
2228 err_out_powerdown:
2229 ssb_bus_may_powerdown(sdev->bus);
2230
2231 err_out_free_dev:
2232 free_netdev(dev);
2233
2234 out:
2235 return err;
2236 }
2237
2238 static void __devexit b44_remove_one(struct ssb_device *sdev)
2239 {
2240 struct net_device *dev = ssb_get_drvdata(sdev);
2241
2242 unregister_netdev(dev);
2243 ssb_device_disable(sdev, 0);
2244 ssb_bus_may_powerdown(sdev->bus);
2245 free_netdev(dev);
2246 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2247 ssb_set_drvdata(sdev, NULL);
2248 }
2249
2250 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2251 {
2252 struct net_device *dev = ssb_get_drvdata(sdev);
2253 struct b44 *bp = netdev_priv(dev);
2254
2255 if (!netif_running(dev))
2256 return 0;
2257
2258 del_timer_sync(&bp->timer);
2259
2260 spin_lock_irq(&bp->lock);
2261
2262 b44_halt(bp);
2263 netif_carrier_off(bp->dev);
2264 netif_device_detach(bp->dev);
2265 b44_free_rings(bp);
2266
2267 spin_unlock_irq(&bp->lock);
2268
2269 free_irq(dev->irq, dev);
2270 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2271 b44_init_hw(bp, B44_PARTIAL_RESET);
2272 b44_setup_wol(bp);
2273 }
2274
2275 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2276 return 0;
2277 }
2278
2279 static int b44_resume(struct ssb_device *sdev)
2280 {
2281 struct net_device *dev = ssb_get_drvdata(sdev);
2282 struct b44 *bp = netdev_priv(dev);
2283 int rc = 0;
2284
2285 rc = ssb_bus_powerup(sdev->bus, 0);
2286 if (rc) {
2287 dev_err(sdev->dev,
2288 "Failed to powerup the bus\n");
2289 return rc;
2290 }
2291
2292 if (!netif_running(dev))
2293 return 0;
2294
2295 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2296 if (rc) {
2297 netdev_err(dev, "request_irq failed\n");
2298 return rc;
2299 }
2300
2301 spin_lock_irq(&bp->lock);
2302
2303 b44_init_rings(bp);
2304 b44_init_hw(bp, B44_FULL_RESET);
2305 netif_device_attach(bp->dev);
2306 spin_unlock_irq(&bp->lock);
2307
2308 b44_enable_ints(bp);
2309 netif_wake_queue(dev);
2310
2311 mod_timer(&bp->timer, jiffies + 1);
2312
2313 return 0;
2314 }
2315
2316 static struct ssb_driver b44_ssb_driver = {
2317 .name = DRV_MODULE_NAME,
2318 .id_table = b44_ssb_tbl,
2319 .probe = b44_init_one,
2320 .remove = __devexit_p(b44_remove_one),
2321 .suspend = b44_suspend,
2322 .resume = b44_resume,
2323 };
2324
2325 static inline int b44_pci_init(void)
2326 {
2327 int err = 0;
2328 #ifdef CONFIG_B44_PCI
2329 err = ssb_pcihost_register(&b44_pci_driver);
2330 #endif
2331 return err;
2332 }
2333
2334 static inline void b44_pci_exit(void)
2335 {
2336 #ifdef CONFIG_B44_PCI
2337 ssb_pcihost_unregister(&b44_pci_driver);
2338 #endif
2339 }
2340
2341 static int __init b44_init(void)
2342 {
2343 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2344 int err;
2345
2346 /* Setup paramaters for syncing RX/TX DMA descriptors */
2347 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2348 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2349
2350 err = b44_pci_init();
2351 if (err)
2352 return err;
2353 err = ssb_driver_register(&b44_ssb_driver);
2354 if (err)
2355 b44_pci_exit();
2356 return err;
2357 }
2358
2359 static void __exit b44_cleanup(void)
2360 {
2361 ssb_driver_unregister(&b44_ssb_driver);
2362 b44_pci_exit();
2363 }
2364
2365 module_init(b44_init);
2366 module_exit(b44_cleanup);
2367