[BNX2]: Use msleep().
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2007 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.6.4"
58 #define DRV_MODULE_RELDATE "August 3, 2007"
59
60 #define RUN_AT(x) (jiffies + (x))
61
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
64
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int disable_msi = 0;
74
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78 typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
84 BCM5708,
85 BCM5708S,
86 BCM5709,
87 BCM5709S,
88 } board_t;
89
90 /* indexed by board_t, above */
91 static const struct {
92 char *name;
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
103 };
104
105 static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
124 { 0, }
125 };
126
127 static struct flash_spec flash_table[] =
128 {
129 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
130 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
131 /* Slow EEPROM */
132 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
133 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
134 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
135 "EEPROM - slow"},
136 /* Expansion entry 0001 */
137 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
138 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
139 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
140 "Entry 0001"},
141 /* Saifun SA25F010 (non-buffered flash) */
142 /* strap, cfg1, & write1 need updates */
143 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
144 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
146 "Non-buffered flash (128kB)"},
147 /* Saifun SA25F020 (non-buffered flash) */
148 /* strap, cfg1, & write1 need updates */
149 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
150 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
151 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
152 "Non-buffered flash (256kB)"},
153 /* Expansion entry 0100 */
154 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
155 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
157 "Entry 0100"},
158 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
159 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
161 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
162 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
163 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
164 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
165 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
166 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
167 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
168 /* Saifun SA25F005 (non-buffered flash) */
169 /* strap, cfg1, & write1 need updates */
170 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
172 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
173 "Non-buffered flash (64kB)"},
174 /* Fast EEPROM */
175 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
176 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
177 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
178 "EEPROM - fast"},
179 /* Expansion entry 1001 */
180 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
181 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
182 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
183 "Entry 1001"},
184 /* Expansion entry 1010 */
185 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
186 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
187 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
188 "Entry 1010"},
189 /* ATMEL AT45DB011B (buffered flash) */
190 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
191 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
192 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
193 "Buffered flash (128kB)"},
194 /* Expansion entry 1100 */
195 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
196 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
197 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
198 "Entry 1100"},
199 /* Expansion entry 1101 */
200 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
201 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
202 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
203 "Entry 1101"},
204 /* Ateml Expansion entry 1110 */
205 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
206 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
207 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
208 "Entry 1110 (Atmel)"},
209 /* ATMEL AT45DB021B (buffered flash) */
210 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
211 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
212 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
213 "Buffered flash (256kB)"},
214 };
215
216 static struct flash_spec flash_5709 = {
217 .flags = BNX2_NV_BUFFERED,
218 .page_bits = BCM5709_FLASH_PAGE_BITS,
219 .page_size = BCM5709_FLASH_PAGE_SIZE,
220 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
221 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
222 .name = "5709 Buffered flash (256kB)",
223 };
224
225 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
226
227 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
228 {
229 u32 diff;
230
231 smp_mb();
232
233 /* The ring uses 256 indices for 255 entries, one of them
234 * needs to be skipped.
235 */
236 diff = bp->tx_prod - bp->tx_cons;
237 if (unlikely(diff >= TX_DESC_CNT)) {
238 diff &= 0xffff;
239 if (diff == TX_DESC_CNT)
240 diff = MAX_TX_DESC_CNT;
241 }
242 return (bp->tx_ring_size - diff);
243 }
244
245 static u32
246 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
247 {
248 u32 val;
249
250 spin_lock_bh(&bp->indirect_lock);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
252 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
253 spin_unlock_bh(&bp->indirect_lock);
254 return val;
255 }
256
257 static void
258 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
259 {
260 spin_lock_bh(&bp->indirect_lock);
261 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
262 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
263 spin_unlock_bh(&bp->indirect_lock);
264 }
265
266 static void
267 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
268 {
269 offset += cid_addr;
270 spin_lock_bh(&bp->indirect_lock);
271 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
272 int i;
273
274 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
275 REG_WR(bp, BNX2_CTX_CTX_CTRL,
276 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
277 for (i = 0; i < 5; i++) {
278 u32 val;
279 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
280 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
281 break;
282 udelay(5);
283 }
284 } else {
285 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
286 REG_WR(bp, BNX2_CTX_DATA, val);
287 }
288 spin_unlock_bh(&bp->indirect_lock);
289 }
290
291 static int
292 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
293 {
294 u32 val1;
295 int i, ret;
296
297 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
298 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
299 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
300
301 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
302 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
303
304 udelay(40);
305 }
306
307 val1 = (bp->phy_addr << 21) | (reg << 16) |
308 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
309 BNX2_EMAC_MDIO_COMM_START_BUSY;
310 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
311
312 for (i = 0; i < 50; i++) {
313 udelay(10);
314
315 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
316 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
317 udelay(5);
318
319 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
320 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
321
322 break;
323 }
324 }
325
326 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
327 *val = 0x0;
328 ret = -EBUSY;
329 }
330 else {
331 *val = val1;
332 ret = 0;
333 }
334
335 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
336 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
337 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
338
339 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
340 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
341
342 udelay(40);
343 }
344
345 return ret;
346 }
347
348 static int
349 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
350 {
351 u32 val1;
352 int i, ret;
353
354 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
355 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
356 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
357
358 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
359 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
360
361 udelay(40);
362 }
363
364 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
365 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
366 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
367 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
368
369 for (i = 0; i < 50; i++) {
370 udelay(10);
371
372 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
373 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
374 udelay(5);
375 break;
376 }
377 }
378
379 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
380 ret = -EBUSY;
381 else
382 ret = 0;
383
384 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
385 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
386 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
387
388 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
389 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
390
391 udelay(40);
392 }
393
394 return ret;
395 }
396
397 static void
398 bnx2_disable_int(struct bnx2 *bp)
399 {
400 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
401 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
402 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
403 }
404
405 static void
406 bnx2_enable_int(struct bnx2 *bp)
407 {
408 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
409 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
410 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
411
412 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
413 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
414
415 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
416 }
417
418 static void
419 bnx2_disable_int_sync(struct bnx2 *bp)
420 {
421 atomic_inc(&bp->intr_sem);
422 bnx2_disable_int(bp);
423 synchronize_irq(bp->pdev->irq);
424 }
425
426 static void
427 bnx2_netif_stop(struct bnx2 *bp)
428 {
429 bnx2_disable_int_sync(bp);
430 if (netif_running(bp->dev)) {
431 netif_poll_disable(bp->dev);
432 netif_tx_disable(bp->dev);
433 bp->dev->trans_start = jiffies; /* prevent tx timeout */
434 }
435 }
436
437 static void
438 bnx2_netif_start(struct bnx2 *bp)
439 {
440 if (atomic_dec_and_test(&bp->intr_sem)) {
441 if (netif_running(bp->dev)) {
442 netif_wake_queue(bp->dev);
443 netif_poll_enable(bp->dev);
444 bnx2_enable_int(bp);
445 }
446 }
447 }
448
449 static void
450 bnx2_free_mem(struct bnx2 *bp)
451 {
452 int i;
453
454 for (i = 0; i < bp->ctx_pages; i++) {
455 if (bp->ctx_blk[i]) {
456 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
457 bp->ctx_blk[i],
458 bp->ctx_blk_mapping[i]);
459 bp->ctx_blk[i] = NULL;
460 }
461 }
462 if (bp->status_blk) {
463 pci_free_consistent(bp->pdev, bp->status_stats_size,
464 bp->status_blk, bp->status_blk_mapping);
465 bp->status_blk = NULL;
466 bp->stats_blk = NULL;
467 }
468 if (bp->tx_desc_ring) {
469 pci_free_consistent(bp->pdev,
470 sizeof(struct tx_bd) * TX_DESC_CNT,
471 bp->tx_desc_ring, bp->tx_desc_mapping);
472 bp->tx_desc_ring = NULL;
473 }
474 kfree(bp->tx_buf_ring);
475 bp->tx_buf_ring = NULL;
476 for (i = 0; i < bp->rx_max_ring; i++) {
477 if (bp->rx_desc_ring[i])
478 pci_free_consistent(bp->pdev,
479 sizeof(struct rx_bd) * RX_DESC_CNT,
480 bp->rx_desc_ring[i],
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
483 }
484 vfree(bp->rx_buf_ring);
485 bp->rx_buf_ring = NULL;
486 }
487
488 static int
489 bnx2_alloc_mem(struct bnx2 *bp)
490 {
491 int i, status_blk_size;
492
493 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
494 GFP_KERNEL);
495 if (bp->tx_buf_ring == NULL)
496 return -ENOMEM;
497
498 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
499 sizeof(struct tx_bd) *
500 TX_DESC_CNT,
501 &bp->tx_desc_mapping);
502 if (bp->tx_desc_ring == NULL)
503 goto alloc_mem_err;
504
505 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
506 bp->rx_max_ring);
507 if (bp->rx_buf_ring == NULL)
508 goto alloc_mem_err;
509
510 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
511 bp->rx_max_ring);
512
513 for (i = 0; i < bp->rx_max_ring; i++) {
514 bp->rx_desc_ring[i] =
515 pci_alloc_consistent(bp->pdev,
516 sizeof(struct rx_bd) * RX_DESC_CNT,
517 &bp->rx_desc_mapping[i]);
518 if (bp->rx_desc_ring[i] == NULL)
519 goto alloc_mem_err;
520
521 }
522
523 /* Combine status and statistics blocks into one allocation. */
524 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
525 bp->status_stats_size = status_blk_size +
526 sizeof(struct statistics_block);
527
528 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
529 &bp->status_blk_mapping);
530 if (bp->status_blk == NULL)
531 goto alloc_mem_err;
532
533 memset(bp->status_blk, 0, bp->status_stats_size);
534
535 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
536 status_blk_size);
537
538 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
539
540 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
541 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
542 if (bp->ctx_pages == 0)
543 bp->ctx_pages = 1;
544 for (i = 0; i < bp->ctx_pages; i++) {
545 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
546 BCM_PAGE_SIZE,
547 &bp->ctx_blk_mapping[i]);
548 if (bp->ctx_blk[i] == NULL)
549 goto alloc_mem_err;
550 }
551 }
552 return 0;
553
554 alloc_mem_err:
555 bnx2_free_mem(bp);
556 return -ENOMEM;
557 }
558
559 static void
560 bnx2_report_fw_link(struct bnx2 *bp)
561 {
562 u32 fw_link_status = 0;
563
564 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
565 return;
566
567 if (bp->link_up) {
568 u32 bmsr;
569
570 switch (bp->line_speed) {
571 case SPEED_10:
572 if (bp->duplex == DUPLEX_HALF)
573 fw_link_status = BNX2_LINK_STATUS_10HALF;
574 else
575 fw_link_status = BNX2_LINK_STATUS_10FULL;
576 break;
577 case SPEED_100:
578 if (bp->duplex == DUPLEX_HALF)
579 fw_link_status = BNX2_LINK_STATUS_100HALF;
580 else
581 fw_link_status = BNX2_LINK_STATUS_100FULL;
582 break;
583 case SPEED_1000:
584 if (bp->duplex == DUPLEX_HALF)
585 fw_link_status = BNX2_LINK_STATUS_1000HALF;
586 else
587 fw_link_status = BNX2_LINK_STATUS_1000FULL;
588 break;
589 case SPEED_2500:
590 if (bp->duplex == DUPLEX_HALF)
591 fw_link_status = BNX2_LINK_STATUS_2500HALF;
592 else
593 fw_link_status = BNX2_LINK_STATUS_2500FULL;
594 break;
595 }
596
597 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
598
599 if (bp->autoneg) {
600 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
601
602 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
603 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
604
605 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
606 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
607 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
608 else
609 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
610 }
611 }
612 else
613 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
614
615 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
616 }
617
618 static char *
619 bnx2_xceiver_str(struct bnx2 *bp)
620 {
621 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
622 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
623 "Copper"));
624 }
625
626 static void
627 bnx2_report_link(struct bnx2 *bp)
628 {
629 if (bp->link_up) {
630 netif_carrier_on(bp->dev);
631 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
632 bnx2_xceiver_str(bp));
633
634 printk("%d Mbps ", bp->line_speed);
635
636 if (bp->duplex == DUPLEX_FULL)
637 printk("full duplex");
638 else
639 printk("half duplex");
640
641 if (bp->flow_ctrl) {
642 if (bp->flow_ctrl & FLOW_CTRL_RX) {
643 printk(", receive ");
644 if (bp->flow_ctrl & FLOW_CTRL_TX)
645 printk("& transmit ");
646 }
647 else {
648 printk(", transmit ");
649 }
650 printk("flow control ON");
651 }
652 printk("\n");
653 }
654 else {
655 netif_carrier_off(bp->dev);
656 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
657 bnx2_xceiver_str(bp));
658 }
659
660 bnx2_report_fw_link(bp);
661 }
662
663 static void
664 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
665 {
666 u32 local_adv, remote_adv;
667
668 bp->flow_ctrl = 0;
669 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
670 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
671
672 if (bp->duplex == DUPLEX_FULL) {
673 bp->flow_ctrl = bp->req_flow_ctrl;
674 }
675 return;
676 }
677
678 if (bp->duplex != DUPLEX_FULL) {
679 return;
680 }
681
682 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
683 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
684 u32 val;
685
686 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
687 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
688 bp->flow_ctrl |= FLOW_CTRL_TX;
689 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
690 bp->flow_ctrl |= FLOW_CTRL_RX;
691 return;
692 }
693
694 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
695 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
696
697 if (bp->phy_flags & PHY_SERDES_FLAG) {
698 u32 new_local_adv = 0;
699 u32 new_remote_adv = 0;
700
701 if (local_adv & ADVERTISE_1000XPAUSE)
702 new_local_adv |= ADVERTISE_PAUSE_CAP;
703 if (local_adv & ADVERTISE_1000XPSE_ASYM)
704 new_local_adv |= ADVERTISE_PAUSE_ASYM;
705 if (remote_adv & ADVERTISE_1000XPAUSE)
706 new_remote_adv |= ADVERTISE_PAUSE_CAP;
707 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
708 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
709
710 local_adv = new_local_adv;
711 remote_adv = new_remote_adv;
712 }
713
714 /* See Table 28B-3 of 802.3ab-1999 spec. */
715 if (local_adv & ADVERTISE_PAUSE_CAP) {
716 if(local_adv & ADVERTISE_PAUSE_ASYM) {
717 if (remote_adv & ADVERTISE_PAUSE_CAP) {
718 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
719 }
720 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
721 bp->flow_ctrl = FLOW_CTRL_RX;
722 }
723 }
724 else {
725 if (remote_adv & ADVERTISE_PAUSE_CAP) {
726 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
727 }
728 }
729 }
730 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
731 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
732 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
733
734 bp->flow_ctrl = FLOW_CTRL_TX;
735 }
736 }
737 }
738
739 static int
740 bnx2_5709s_linkup(struct bnx2 *bp)
741 {
742 u32 val, speed;
743
744 bp->link_up = 1;
745
746 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
747 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
748 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
749
750 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
751 bp->line_speed = bp->req_line_speed;
752 bp->duplex = bp->req_duplex;
753 return 0;
754 }
755 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
756 switch (speed) {
757 case MII_BNX2_GP_TOP_AN_SPEED_10:
758 bp->line_speed = SPEED_10;
759 break;
760 case MII_BNX2_GP_TOP_AN_SPEED_100:
761 bp->line_speed = SPEED_100;
762 break;
763 case MII_BNX2_GP_TOP_AN_SPEED_1G:
764 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
765 bp->line_speed = SPEED_1000;
766 break;
767 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
768 bp->line_speed = SPEED_2500;
769 break;
770 }
771 if (val & MII_BNX2_GP_TOP_AN_FD)
772 bp->duplex = DUPLEX_FULL;
773 else
774 bp->duplex = DUPLEX_HALF;
775 return 0;
776 }
777
778 static int
779 bnx2_5708s_linkup(struct bnx2 *bp)
780 {
781 u32 val;
782
783 bp->link_up = 1;
784 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
785 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
786 case BCM5708S_1000X_STAT1_SPEED_10:
787 bp->line_speed = SPEED_10;
788 break;
789 case BCM5708S_1000X_STAT1_SPEED_100:
790 bp->line_speed = SPEED_100;
791 break;
792 case BCM5708S_1000X_STAT1_SPEED_1G:
793 bp->line_speed = SPEED_1000;
794 break;
795 case BCM5708S_1000X_STAT1_SPEED_2G5:
796 bp->line_speed = SPEED_2500;
797 break;
798 }
799 if (val & BCM5708S_1000X_STAT1_FD)
800 bp->duplex = DUPLEX_FULL;
801 else
802 bp->duplex = DUPLEX_HALF;
803
804 return 0;
805 }
806
807 static int
808 bnx2_5706s_linkup(struct bnx2 *bp)
809 {
810 u32 bmcr, local_adv, remote_adv, common;
811
812 bp->link_up = 1;
813 bp->line_speed = SPEED_1000;
814
815 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
816 if (bmcr & BMCR_FULLDPLX) {
817 bp->duplex = DUPLEX_FULL;
818 }
819 else {
820 bp->duplex = DUPLEX_HALF;
821 }
822
823 if (!(bmcr & BMCR_ANENABLE)) {
824 return 0;
825 }
826
827 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
828 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
829
830 common = local_adv & remote_adv;
831 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
832
833 if (common & ADVERTISE_1000XFULL) {
834 bp->duplex = DUPLEX_FULL;
835 }
836 else {
837 bp->duplex = DUPLEX_HALF;
838 }
839 }
840
841 return 0;
842 }
843
844 static int
845 bnx2_copper_linkup(struct bnx2 *bp)
846 {
847 u32 bmcr;
848
849 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
850 if (bmcr & BMCR_ANENABLE) {
851 u32 local_adv, remote_adv, common;
852
853 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
854 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
855
856 common = local_adv & (remote_adv >> 2);
857 if (common & ADVERTISE_1000FULL) {
858 bp->line_speed = SPEED_1000;
859 bp->duplex = DUPLEX_FULL;
860 }
861 else if (common & ADVERTISE_1000HALF) {
862 bp->line_speed = SPEED_1000;
863 bp->duplex = DUPLEX_HALF;
864 }
865 else {
866 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
867 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
868
869 common = local_adv & remote_adv;
870 if (common & ADVERTISE_100FULL) {
871 bp->line_speed = SPEED_100;
872 bp->duplex = DUPLEX_FULL;
873 }
874 else if (common & ADVERTISE_100HALF) {
875 bp->line_speed = SPEED_100;
876 bp->duplex = DUPLEX_HALF;
877 }
878 else if (common & ADVERTISE_10FULL) {
879 bp->line_speed = SPEED_10;
880 bp->duplex = DUPLEX_FULL;
881 }
882 else if (common & ADVERTISE_10HALF) {
883 bp->line_speed = SPEED_10;
884 bp->duplex = DUPLEX_HALF;
885 }
886 else {
887 bp->line_speed = 0;
888 bp->link_up = 0;
889 }
890 }
891 }
892 else {
893 if (bmcr & BMCR_SPEED100) {
894 bp->line_speed = SPEED_100;
895 }
896 else {
897 bp->line_speed = SPEED_10;
898 }
899 if (bmcr & BMCR_FULLDPLX) {
900 bp->duplex = DUPLEX_FULL;
901 }
902 else {
903 bp->duplex = DUPLEX_HALF;
904 }
905 }
906
907 return 0;
908 }
909
910 static int
911 bnx2_set_mac_link(struct bnx2 *bp)
912 {
913 u32 val;
914
915 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
916 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
917 (bp->duplex == DUPLEX_HALF)) {
918 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
919 }
920
921 /* Configure the EMAC mode register. */
922 val = REG_RD(bp, BNX2_EMAC_MODE);
923
924 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
925 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
926 BNX2_EMAC_MODE_25G_MODE);
927
928 if (bp->link_up) {
929 switch (bp->line_speed) {
930 case SPEED_10:
931 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
932 val |= BNX2_EMAC_MODE_PORT_MII_10M;
933 break;
934 }
935 /* fall through */
936 case SPEED_100:
937 val |= BNX2_EMAC_MODE_PORT_MII;
938 break;
939 case SPEED_2500:
940 val |= BNX2_EMAC_MODE_25G_MODE;
941 /* fall through */
942 case SPEED_1000:
943 val |= BNX2_EMAC_MODE_PORT_GMII;
944 break;
945 }
946 }
947 else {
948 val |= BNX2_EMAC_MODE_PORT_GMII;
949 }
950
951 /* Set the MAC to operate in the appropriate duplex mode. */
952 if (bp->duplex == DUPLEX_HALF)
953 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
954 REG_WR(bp, BNX2_EMAC_MODE, val);
955
956 /* Enable/disable rx PAUSE. */
957 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
958
959 if (bp->flow_ctrl & FLOW_CTRL_RX)
960 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
961 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
962
963 /* Enable/disable tx PAUSE. */
964 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
965 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
966
967 if (bp->flow_ctrl & FLOW_CTRL_TX)
968 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
969 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
970
971 /* Acknowledge the interrupt. */
972 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
973
974 return 0;
975 }
976
977 static void
978 bnx2_enable_bmsr1(struct bnx2 *bp)
979 {
980 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
981 (CHIP_NUM(bp) == CHIP_NUM_5709))
982 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
983 MII_BNX2_BLK_ADDR_GP_STATUS);
984 }
985
986 static void
987 bnx2_disable_bmsr1(struct bnx2 *bp)
988 {
989 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
990 (CHIP_NUM(bp) == CHIP_NUM_5709))
991 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
992 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
993 }
994
995 static int
996 bnx2_test_and_enable_2g5(struct bnx2 *bp)
997 {
998 u32 up1;
999 int ret = 1;
1000
1001 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1002 return 0;
1003
1004 if (bp->autoneg & AUTONEG_SPEED)
1005 bp->advertising |= ADVERTISED_2500baseX_Full;
1006
1007 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1008 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1009
1010 bnx2_read_phy(bp, bp->mii_up1, &up1);
1011 if (!(up1 & BCM5708S_UP1_2G5)) {
1012 up1 |= BCM5708S_UP1_2G5;
1013 bnx2_write_phy(bp, bp->mii_up1, up1);
1014 ret = 0;
1015 }
1016
1017 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1018 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1019 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1020
1021 return ret;
1022 }
1023
1024 static int
1025 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1026 {
1027 u32 up1;
1028 int ret = 0;
1029
1030 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1031 return 0;
1032
1033 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1034 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1035
1036 bnx2_read_phy(bp, bp->mii_up1, &up1);
1037 if (up1 & BCM5708S_UP1_2G5) {
1038 up1 &= ~BCM5708S_UP1_2G5;
1039 bnx2_write_phy(bp, bp->mii_up1, up1);
1040 ret = 1;
1041 }
1042
1043 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046
1047 return ret;
1048 }
1049
1050 static void
1051 bnx2_enable_forced_2g5(struct bnx2 *bp)
1052 {
1053 u32 bmcr;
1054
1055 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1056 return;
1057
1058 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1059 u32 val;
1060
1061 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1062 MII_BNX2_BLK_ADDR_SERDES_DIG);
1063 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1064 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1065 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1066 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1067
1068 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1069 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1070 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1071
1072 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1073 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1074 bmcr |= BCM5708S_BMCR_FORCE_2500;
1075 }
1076
1077 if (bp->autoneg & AUTONEG_SPEED) {
1078 bmcr &= ~BMCR_ANENABLE;
1079 if (bp->req_duplex == DUPLEX_FULL)
1080 bmcr |= BMCR_FULLDPLX;
1081 }
1082 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1083 }
1084
1085 static void
1086 bnx2_disable_forced_2g5(struct bnx2 *bp)
1087 {
1088 u32 bmcr;
1089
1090 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1091 return;
1092
1093 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1094 u32 val;
1095
1096 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1097 MII_BNX2_BLK_ADDR_SERDES_DIG);
1098 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1099 val &= ~MII_BNX2_SD_MISC1_FORCE;
1100 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1101
1102 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1103 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1104 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1105
1106 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1107 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1108 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1109 }
1110
1111 if (bp->autoneg & AUTONEG_SPEED)
1112 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1113 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1114 }
1115
1116 static int
1117 bnx2_set_link(struct bnx2 *bp)
1118 {
1119 u32 bmsr;
1120 u8 link_up;
1121
1122 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1123 bp->link_up = 1;
1124 return 0;
1125 }
1126
1127 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1128 return 0;
1129
1130 link_up = bp->link_up;
1131
1132 bnx2_enable_bmsr1(bp);
1133 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1134 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1135 bnx2_disable_bmsr1(bp);
1136
1137 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1138 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1139 u32 val;
1140
1141 val = REG_RD(bp, BNX2_EMAC_STATUS);
1142 if (val & BNX2_EMAC_STATUS_LINK)
1143 bmsr |= BMSR_LSTATUS;
1144 else
1145 bmsr &= ~BMSR_LSTATUS;
1146 }
1147
1148 if (bmsr & BMSR_LSTATUS) {
1149 bp->link_up = 1;
1150
1151 if (bp->phy_flags & PHY_SERDES_FLAG) {
1152 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1153 bnx2_5706s_linkup(bp);
1154 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1155 bnx2_5708s_linkup(bp);
1156 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1157 bnx2_5709s_linkup(bp);
1158 }
1159 else {
1160 bnx2_copper_linkup(bp);
1161 }
1162 bnx2_resolve_flow_ctrl(bp);
1163 }
1164 else {
1165 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1166 (bp->autoneg & AUTONEG_SPEED))
1167 bnx2_disable_forced_2g5(bp);
1168
1169 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1170 bp->link_up = 0;
1171 }
1172
1173 if (bp->link_up != link_up) {
1174 bnx2_report_link(bp);
1175 }
1176
1177 bnx2_set_mac_link(bp);
1178
1179 return 0;
1180 }
1181
1182 static int
1183 bnx2_reset_phy(struct bnx2 *bp)
1184 {
1185 int i;
1186 u32 reg;
1187
1188 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1189
1190 #define PHY_RESET_MAX_WAIT 100
1191 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1192 udelay(10);
1193
1194 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1195 if (!(reg & BMCR_RESET)) {
1196 udelay(20);
1197 break;
1198 }
1199 }
1200 if (i == PHY_RESET_MAX_WAIT) {
1201 return -EBUSY;
1202 }
1203 return 0;
1204 }
1205
1206 static u32
1207 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1208 {
1209 u32 adv = 0;
1210
1211 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1212 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1213
1214 if (bp->phy_flags & PHY_SERDES_FLAG) {
1215 adv = ADVERTISE_1000XPAUSE;
1216 }
1217 else {
1218 adv = ADVERTISE_PAUSE_CAP;
1219 }
1220 }
1221 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1222 if (bp->phy_flags & PHY_SERDES_FLAG) {
1223 adv = ADVERTISE_1000XPSE_ASYM;
1224 }
1225 else {
1226 adv = ADVERTISE_PAUSE_ASYM;
1227 }
1228 }
1229 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1230 if (bp->phy_flags & PHY_SERDES_FLAG) {
1231 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1232 }
1233 else {
1234 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1235 }
1236 }
1237 return adv;
1238 }
1239
1240 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1241
1242 static int
1243 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1244 {
1245 u32 speed_arg = 0, pause_adv;
1246
1247 pause_adv = bnx2_phy_get_pause_adv(bp);
1248
1249 if (bp->autoneg & AUTONEG_SPEED) {
1250 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1251 if (bp->advertising & ADVERTISED_10baseT_Half)
1252 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1253 if (bp->advertising & ADVERTISED_10baseT_Full)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1255 if (bp->advertising & ADVERTISED_100baseT_Half)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1257 if (bp->advertising & ADVERTISED_100baseT_Full)
1258 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1259 if (bp->advertising & ADVERTISED_1000baseT_Full)
1260 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1261 if (bp->advertising & ADVERTISED_2500baseX_Full)
1262 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1263 } else {
1264 if (bp->req_line_speed == SPEED_2500)
1265 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1266 else if (bp->req_line_speed == SPEED_1000)
1267 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1268 else if (bp->req_line_speed == SPEED_100) {
1269 if (bp->req_duplex == DUPLEX_FULL)
1270 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1271 else
1272 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1273 } else if (bp->req_line_speed == SPEED_10) {
1274 if (bp->req_duplex == DUPLEX_FULL)
1275 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1276 else
1277 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1278 }
1279 }
1280
1281 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1282 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1283 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1284 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1285
1286 if (port == PORT_TP)
1287 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1288 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1289
1290 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1291
1292 spin_unlock_bh(&bp->phy_lock);
1293 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1294 spin_lock_bh(&bp->phy_lock);
1295
1296 return 0;
1297 }
1298
1299 static int
1300 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1301 {
1302 u32 adv, bmcr;
1303 u32 new_adv = 0;
1304
1305 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1306 return (bnx2_setup_remote_phy(bp, port));
1307
1308 if (!(bp->autoneg & AUTONEG_SPEED)) {
1309 u32 new_bmcr;
1310 int force_link_down = 0;
1311
1312 if (bp->req_line_speed == SPEED_2500) {
1313 if (!bnx2_test_and_enable_2g5(bp))
1314 force_link_down = 1;
1315 } else if (bp->req_line_speed == SPEED_1000) {
1316 if (bnx2_test_and_disable_2g5(bp))
1317 force_link_down = 1;
1318 }
1319 bnx2_read_phy(bp, bp->mii_adv, &adv);
1320 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1321
1322 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1323 new_bmcr = bmcr & ~BMCR_ANENABLE;
1324 new_bmcr |= BMCR_SPEED1000;
1325
1326 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1327 if (bp->req_line_speed == SPEED_2500)
1328 bnx2_enable_forced_2g5(bp);
1329 else if (bp->req_line_speed == SPEED_1000) {
1330 bnx2_disable_forced_2g5(bp);
1331 new_bmcr &= ~0x2000;
1332 }
1333
1334 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1335 if (bp->req_line_speed == SPEED_2500)
1336 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1337 else
1338 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1339 }
1340
1341 if (bp->req_duplex == DUPLEX_FULL) {
1342 adv |= ADVERTISE_1000XFULL;
1343 new_bmcr |= BMCR_FULLDPLX;
1344 }
1345 else {
1346 adv |= ADVERTISE_1000XHALF;
1347 new_bmcr &= ~BMCR_FULLDPLX;
1348 }
1349 if ((new_bmcr != bmcr) || (force_link_down)) {
1350 /* Force a link down visible on the other side */
1351 if (bp->link_up) {
1352 bnx2_write_phy(bp, bp->mii_adv, adv &
1353 ~(ADVERTISE_1000XFULL |
1354 ADVERTISE_1000XHALF));
1355 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1356 BMCR_ANRESTART | BMCR_ANENABLE);
1357
1358 bp->link_up = 0;
1359 netif_carrier_off(bp->dev);
1360 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1361 bnx2_report_link(bp);
1362 }
1363 bnx2_write_phy(bp, bp->mii_adv, adv);
1364 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1365 } else {
1366 bnx2_resolve_flow_ctrl(bp);
1367 bnx2_set_mac_link(bp);
1368 }
1369 return 0;
1370 }
1371
1372 bnx2_test_and_enable_2g5(bp);
1373
1374 if (bp->advertising & ADVERTISED_1000baseT_Full)
1375 new_adv |= ADVERTISE_1000XFULL;
1376
1377 new_adv |= bnx2_phy_get_pause_adv(bp);
1378
1379 bnx2_read_phy(bp, bp->mii_adv, &adv);
1380 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1381
1382 bp->serdes_an_pending = 0;
1383 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1384 /* Force a link down visible on the other side */
1385 if (bp->link_up) {
1386 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1387 spin_unlock_bh(&bp->phy_lock);
1388 msleep(20);
1389 spin_lock_bh(&bp->phy_lock);
1390 }
1391
1392 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1393 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1394 BMCR_ANENABLE);
1395 /* Speed up link-up time when the link partner
1396 * does not autonegotiate which is very common
1397 * in blade servers. Some blade servers use
1398 * IPMI for kerboard input and it's important
1399 * to minimize link disruptions. Autoneg. involves
1400 * exchanging base pages plus 3 next pages and
1401 * normally completes in about 120 msec.
1402 */
1403 bp->current_interval = SERDES_AN_TIMEOUT;
1404 bp->serdes_an_pending = 1;
1405 mod_timer(&bp->timer, jiffies + bp->current_interval);
1406 } else {
1407 bnx2_resolve_flow_ctrl(bp);
1408 bnx2_set_mac_link(bp);
1409 }
1410
1411 return 0;
1412 }
1413
1414 #define ETHTOOL_ALL_FIBRE_SPEED \
1415 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1416 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1417 (ADVERTISED_1000baseT_Full)
1418
1419 #define ETHTOOL_ALL_COPPER_SPEED \
1420 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1421 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1422 ADVERTISED_1000baseT_Full)
1423
1424 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1425 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1426
1427 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1428
1429 static void
1430 bnx2_set_default_remote_link(struct bnx2 *bp)
1431 {
1432 u32 link;
1433
1434 if (bp->phy_port == PORT_TP)
1435 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1436 else
1437 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1438
1439 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1440 bp->req_line_speed = 0;
1441 bp->autoneg |= AUTONEG_SPEED;
1442 bp->advertising = ADVERTISED_Autoneg;
1443 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1444 bp->advertising |= ADVERTISED_10baseT_Half;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1446 bp->advertising |= ADVERTISED_10baseT_Full;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1448 bp->advertising |= ADVERTISED_100baseT_Half;
1449 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1450 bp->advertising |= ADVERTISED_100baseT_Full;
1451 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1452 bp->advertising |= ADVERTISED_1000baseT_Full;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1454 bp->advertising |= ADVERTISED_2500baseX_Full;
1455 } else {
1456 bp->autoneg = 0;
1457 bp->advertising = 0;
1458 bp->req_duplex = DUPLEX_FULL;
1459 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1460 bp->req_line_speed = SPEED_10;
1461 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1462 bp->req_duplex = DUPLEX_HALF;
1463 }
1464 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1465 bp->req_line_speed = SPEED_100;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1467 bp->req_duplex = DUPLEX_HALF;
1468 }
1469 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1470 bp->req_line_speed = SPEED_1000;
1471 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1472 bp->req_line_speed = SPEED_2500;
1473 }
1474 }
1475
1476 static void
1477 bnx2_set_default_link(struct bnx2 *bp)
1478 {
1479 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1480 return bnx2_set_default_remote_link(bp);
1481
1482 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1483 bp->req_line_speed = 0;
1484 if (bp->phy_flags & PHY_SERDES_FLAG) {
1485 u32 reg;
1486
1487 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1488
1489 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1490 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1491 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1492 bp->autoneg = 0;
1493 bp->req_line_speed = bp->line_speed = SPEED_1000;
1494 bp->req_duplex = DUPLEX_FULL;
1495 }
1496 } else
1497 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1498 }
1499
1500 static void
1501 bnx2_send_heart_beat(struct bnx2 *bp)
1502 {
1503 u32 msg;
1504 u32 addr;
1505
1506 spin_lock(&bp->indirect_lock);
1507 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1508 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1509 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1510 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1511 spin_unlock(&bp->indirect_lock);
1512 }
1513
1514 static void
1515 bnx2_remote_phy_event(struct bnx2 *bp)
1516 {
1517 u32 msg;
1518 u8 link_up = bp->link_up;
1519 u8 old_port;
1520
1521 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1522
1523 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1524 bnx2_send_heart_beat(bp);
1525
1526 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1527
1528 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1529 bp->link_up = 0;
1530 else {
1531 u32 speed;
1532
1533 bp->link_up = 1;
1534 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1535 bp->duplex = DUPLEX_FULL;
1536 switch (speed) {
1537 case BNX2_LINK_STATUS_10HALF:
1538 bp->duplex = DUPLEX_HALF;
1539 case BNX2_LINK_STATUS_10FULL:
1540 bp->line_speed = SPEED_10;
1541 break;
1542 case BNX2_LINK_STATUS_100HALF:
1543 bp->duplex = DUPLEX_HALF;
1544 case BNX2_LINK_STATUS_100BASE_T4:
1545 case BNX2_LINK_STATUS_100FULL:
1546 bp->line_speed = SPEED_100;
1547 break;
1548 case BNX2_LINK_STATUS_1000HALF:
1549 bp->duplex = DUPLEX_HALF;
1550 case BNX2_LINK_STATUS_1000FULL:
1551 bp->line_speed = SPEED_1000;
1552 break;
1553 case BNX2_LINK_STATUS_2500HALF:
1554 bp->duplex = DUPLEX_HALF;
1555 case BNX2_LINK_STATUS_2500FULL:
1556 bp->line_speed = SPEED_2500;
1557 break;
1558 default:
1559 bp->line_speed = 0;
1560 break;
1561 }
1562
1563 spin_lock(&bp->phy_lock);
1564 bp->flow_ctrl = 0;
1565 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1566 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1567 if (bp->duplex == DUPLEX_FULL)
1568 bp->flow_ctrl = bp->req_flow_ctrl;
1569 } else {
1570 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1571 bp->flow_ctrl |= FLOW_CTRL_TX;
1572 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1573 bp->flow_ctrl |= FLOW_CTRL_RX;
1574 }
1575
1576 old_port = bp->phy_port;
1577 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1578 bp->phy_port = PORT_FIBRE;
1579 else
1580 bp->phy_port = PORT_TP;
1581
1582 if (old_port != bp->phy_port)
1583 bnx2_set_default_link(bp);
1584
1585 spin_unlock(&bp->phy_lock);
1586 }
1587 if (bp->link_up != link_up)
1588 bnx2_report_link(bp);
1589
1590 bnx2_set_mac_link(bp);
1591 }
1592
1593 static int
1594 bnx2_set_remote_link(struct bnx2 *bp)
1595 {
1596 u32 evt_code;
1597
1598 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1599 switch (evt_code) {
1600 case BNX2_FW_EVT_CODE_LINK_EVENT:
1601 bnx2_remote_phy_event(bp);
1602 break;
1603 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1604 default:
1605 bnx2_send_heart_beat(bp);
1606 break;
1607 }
1608 return 0;
1609 }
1610
1611 static int
1612 bnx2_setup_copper_phy(struct bnx2 *bp)
1613 {
1614 u32 bmcr;
1615 u32 new_bmcr;
1616
1617 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1618
1619 if (bp->autoneg & AUTONEG_SPEED) {
1620 u32 adv_reg, adv1000_reg;
1621 u32 new_adv_reg = 0;
1622 u32 new_adv1000_reg = 0;
1623
1624 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1625 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1626 ADVERTISE_PAUSE_ASYM);
1627
1628 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1629 adv1000_reg &= PHY_ALL_1000_SPEED;
1630
1631 if (bp->advertising & ADVERTISED_10baseT_Half)
1632 new_adv_reg |= ADVERTISE_10HALF;
1633 if (bp->advertising & ADVERTISED_10baseT_Full)
1634 new_adv_reg |= ADVERTISE_10FULL;
1635 if (bp->advertising & ADVERTISED_100baseT_Half)
1636 new_adv_reg |= ADVERTISE_100HALF;
1637 if (bp->advertising & ADVERTISED_100baseT_Full)
1638 new_adv_reg |= ADVERTISE_100FULL;
1639 if (bp->advertising & ADVERTISED_1000baseT_Full)
1640 new_adv1000_reg |= ADVERTISE_1000FULL;
1641
1642 new_adv_reg |= ADVERTISE_CSMA;
1643
1644 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1645
1646 if ((adv1000_reg != new_adv1000_reg) ||
1647 (adv_reg != new_adv_reg) ||
1648 ((bmcr & BMCR_ANENABLE) == 0)) {
1649
1650 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1651 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1652 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1653 BMCR_ANENABLE);
1654 }
1655 else if (bp->link_up) {
1656 /* Flow ctrl may have changed from auto to forced */
1657 /* or vice-versa. */
1658
1659 bnx2_resolve_flow_ctrl(bp);
1660 bnx2_set_mac_link(bp);
1661 }
1662 return 0;
1663 }
1664
1665 new_bmcr = 0;
1666 if (bp->req_line_speed == SPEED_100) {
1667 new_bmcr |= BMCR_SPEED100;
1668 }
1669 if (bp->req_duplex == DUPLEX_FULL) {
1670 new_bmcr |= BMCR_FULLDPLX;
1671 }
1672 if (new_bmcr != bmcr) {
1673 u32 bmsr;
1674
1675 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1676 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1677
1678 if (bmsr & BMSR_LSTATUS) {
1679 /* Force link down */
1680 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1681 spin_unlock_bh(&bp->phy_lock);
1682 msleep(50);
1683 spin_lock_bh(&bp->phy_lock);
1684
1685 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1686 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1687 }
1688
1689 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1690
1691 /* Normally, the new speed is setup after the link has
1692 * gone down and up again. In some cases, link will not go
1693 * down so we need to set up the new speed here.
1694 */
1695 if (bmsr & BMSR_LSTATUS) {
1696 bp->line_speed = bp->req_line_speed;
1697 bp->duplex = bp->req_duplex;
1698 bnx2_resolve_flow_ctrl(bp);
1699 bnx2_set_mac_link(bp);
1700 }
1701 } else {
1702 bnx2_resolve_flow_ctrl(bp);
1703 bnx2_set_mac_link(bp);
1704 }
1705 return 0;
1706 }
1707
1708 static int
1709 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1710 {
1711 if (bp->loopback == MAC_LOOPBACK)
1712 return 0;
1713
1714 if (bp->phy_flags & PHY_SERDES_FLAG) {
1715 return (bnx2_setup_serdes_phy(bp, port));
1716 }
1717 else {
1718 return (bnx2_setup_copper_phy(bp));
1719 }
1720 }
1721
1722 static int
1723 bnx2_init_5709s_phy(struct bnx2 *bp)
1724 {
1725 u32 val;
1726
1727 bp->mii_bmcr = MII_BMCR + 0x10;
1728 bp->mii_bmsr = MII_BMSR + 0x10;
1729 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1730 bp->mii_adv = MII_ADVERTISE + 0x10;
1731 bp->mii_lpa = MII_LPA + 0x10;
1732 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1733
1734 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1735 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1736
1737 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1738 bnx2_reset_phy(bp);
1739
1740 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1741
1742 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1743 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1744 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1745 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1746
1747 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1748 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1749 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1750 val |= BCM5708S_UP1_2G5;
1751 else
1752 val &= ~BCM5708S_UP1_2G5;
1753 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1754
1755 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1756 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1757 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1758 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1759
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1761
1762 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1763 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1764 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1765
1766 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1767
1768 return 0;
1769 }
1770
1771 static int
1772 bnx2_init_5708s_phy(struct bnx2 *bp)
1773 {
1774 u32 val;
1775
1776 bnx2_reset_phy(bp);
1777
1778 bp->mii_up1 = BCM5708S_UP1;
1779
1780 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1781 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1782 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1783
1784 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1785 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1786 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1787
1788 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1789 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1790 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1791
1792 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1793 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1794 val |= BCM5708S_UP1_2G5;
1795 bnx2_write_phy(bp, BCM5708S_UP1, val);
1796 }
1797
1798 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1799 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1800 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1801 /* increase tx signal amplitude */
1802 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1803 BCM5708S_BLK_ADDR_TX_MISC);
1804 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1805 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1806 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1807 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1808 }
1809
1810 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1811 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1812
1813 if (val) {
1814 u32 is_backplane;
1815
1816 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1817 BNX2_SHARED_HW_CFG_CONFIG);
1818 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1819 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1820 BCM5708S_BLK_ADDR_TX_MISC);
1821 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1822 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1823 BCM5708S_BLK_ADDR_DIG);
1824 }
1825 }
1826 return 0;
1827 }
1828
1829 static int
1830 bnx2_init_5706s_phy(struct bnx2 *bp)
1831 {
1832 bnx2_reset_phy(bp);
1833
1834 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1835
1836 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1837 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1838
1839 if (bp->dev->mtu > 1500) {
1840 u32 val;
1841
1842 /* Set extended packet length bit */
1843 bnx2_write_phy(bp, 0x18, 0x7);
1844 bnx2_read_phy(bp, 0x18, &val);
1845 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1846
1847 bnx2_write_phy(bp, 0x1c, 0x6c00);
1848 bnx2_read_phy(bp, 0x1c, &val);
1849 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1850 }
1851 else {
1852 u32 val;
1853
1854 bnx2_write_phy(bp, 0x18, 0x7);
1855 bnx2_read_phy(bp, 0x18, &val);
1856 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1857
1858 bnx2_write_phy(bp, 0x1c, 0x6c00);
1859 bnx2_read_phy(bp, 0x1c, &val);
1860 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1861 }
1862
1863 return 0;
1864 }
1865
1866 static int
1867 bnx2_init_copper_phy(struct bnx2 *bp)
1868 {
1869 u32 val;
1870
1871 bnx2_reset_phy(bp);
1872
1873 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1874 bnx2_write_phy(bp, 0x18, 0x0c00);
1875 bnx2_write_phy(bp, 0x17, 0x000a);
1876 bnx2_write_phy(bp, 0x15, 0x310b);
1877 bnx2_write_phy(bp, 0x17, 0x201f);
1878 bnx2_write_phy(bp, 0x15, 0x9506);
1879 bnx2_write_phy(bp, 0x17, 0x401f);
1880 bnx2_write_phy(bp, 0x15, 0x14e2);
1881 bnx2_write_phy(bp, 0x18, 0x0400);
1882 }
1883
1884 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1885 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1886 MII_BNX2_DSP_EXPAND_REG | 0x8);
1887 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1888 val &= ~(1 << 8);
1889 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1890 }
1891
1892 if (bp->dev->mtu > 1500) {
1893 /* Set extended packet length bit */
1894 bnx2_write_phy(bp, 0x18, 0x7);
1895 bnx2_read_phy(bp, 0x18, &val);
1896 bnx2_write_phy(bp, 0x18, val | 0x4000);
1897
1898 bnx2_read_phy(bp, 0x10, &val);
1899 bnx2_write_phy(bp, 0x10, val | 0x1);
1900 }
1901 else {
1902 bnx2_write_phy(bp, 0x18, 0x7);
1903 bnx2_read_phy(bp, 0x18, &val);
1904 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1905
1906 bnx2_read_phy(bp, 0x10, &val);
1907 bnx2_write_phy(bp, 0x10, val & ~0x1);
1908 }
1909
1910 /* ethernet@wirespeed */
1911 bnx2_write_phy(bp, 0x18, 0x7007);
1912 bnx2_read_phy(bp, 0x18, &val);
1913 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1914 return 0;
1915 }
1916
1917
1918 static int
1919 bnx2_init_phy(struct bnx2 *bp)
1920 {
1921 u32 val;
1922 int rc = 0;
1923
1924 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1925 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1926
1927 bp->mii_bmcr = MII_BMCR;
1928 bp->mii_bmsr = MII_BMSR;
1929 bp->mii_bmsr1 = MII_BMSR;
1930 bp->mii_adv = MII_ADVERTISE;
1931 bp->mii_lpa = MII_LPA;
1932
1933 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1934
1935 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1936 goto setup_phy;
1937
1938 bnx2_read_phy(bp, MII_PHYSID1, &val);
1939 bp->phy_id = val << 16;
1940 bnx2_read_phy(bp, MII_PHYSID2, &val);
1941 bp->phy_id |= val & 0xffff;
1942
1943 if (bp->phy_flags & PHY_SERDES_FLAG) {
1944 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1945 rc = bnx2_init_5706s_phy(bp);
1946 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1947 rc = bnx2_init_5708s_phy(bp);
1948 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1949 rc = bnx2_init_5709s_phy(bp);
1950 }
1951 else {
1952 rc = bnx2_init_copper_phy(bp);
1953 }
1954
1955 setup_phy:
1956 if (!rc)
1957 rc = bnx2_setup_phy(bp, bp->phy_port);
1958
1959 return rc;
1960 }
1961
1962 static int
1963 bnx2_set_mac_loopback(struct bnx2 *bp)
1964 {
1965 u32 mac_mode;
1966
1967 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1968 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1969 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1970 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1971 bp->link_up = 1;
1972 return 0;
1973 }
1974
1975 static int bnx2_test_link(struct bnx2 *);
1976
1977 static int
1978 bnx2_set_phy_loopback(struct bnx2 *bp)
1979 {
1980 u32 mac_mode;
1981 int rc, i;
1982
1983 spin_lock_bh(&bp->phy_lock);
1984 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1985 BMCR_SPEED1000);
1986 spin_unlock_bh(&bp->phy_lock);
1987 if (rc)
1988 return rc;
1989
1990 for (i = 0; i < 10; i++) {
1991 if (bnx2_test_link(bp) == 0)
1992 break;
1993 msleep(100);
1994 }
1995
1996 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1997 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1998 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1999 BNX2_EMAC_MODE_25G_MODE);
2000
2001 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2002 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2003 bp->link_up = 1;
2004 return 0;
2005 }
2006
2007 static int
2008 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2009 {
2010 int i;
2011 u32 val;
2012
2013 bp->fw_wr_seq++;
2014 msg_data |= bp->fw_wr_seq;
2015
2016 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2017
2018 /* wait for an acknowledgement. */
2019 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2020 msleep(10);
2021
2022 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2023
2024 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2025 break;
2026 }
2027 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2028 return 0;
2029
2030 /* If we timed out, inform the firmware that this is the case. */
2031 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2032 if (!silent)
2033 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2034 "%x\n", msg_data);
2035
2036 msg_data &= ~BNX2_DRV_MSG_CODE;
2037 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2038
2039 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2040
2041 return -EBUSY;
2042 }
2043
2044 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2045 return -EIO;
2046
2047 return 0;
2048 }
2049
2050 static int
2051 bnx2_init_5709_context(struct bnx2 *bp)
2052 {
2053 int i, ret = 0;
2054 u32 val;
2055
2056 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2057 val |= (BCM_PAGE_BITS - 8) << 16;
2058 REG_WR(bp, BNX2_CTX_COMMAND, val);
2059 for (i = 0; i < 10; i++) {
2060 val = REG_RD(bp, BNX2_CTX_COMMAND);
2061 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2062 break;
2063 udelay(2);
2064 }
2065 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2066 return -EBUSY;
2067
2068 for (i = 0; i < bp->ctx_pages; i++) {
2069 int j;
2070
2071 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2072 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2073 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2074 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2075 (u64) bp->ctx_blk_mapping[i] >> 32);
2076 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2077 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2078 for (j = 0; j < 10; j++) {
2079
2080 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2081 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2082 break;
2083 udelay(5);
2084 }
2085 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2086 ret = -EBUSY;
2087 break;
2088 }
2089 }
2090 return ret;
2091 }
2092
2093 static void
2094 bnx2_init_context(struct bnx2 *bp)
2095 {
2096 u32 vcid;
2097
2098 vcid = 96;
2099 while (vcid) {
2100 u32 vcid_addr, pcid_addr, offset;
2101 int i;
2102
2103 vcid--;
2104
2105 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2106 u32 new_vcid;
2107
2108 vcid_addr = GET_PCID_ADDR(vcid);
2109 if (vcid & 0x8) {
2110 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2111 }
2112 else {
2113 new_vcid = vcid;
2114 }
2115 pcid_addr = GET_PCID_ADDR(new_vcid);
2116 }
2117 else {
2118 vcid_addr = GET_CID_ADDR(vcid);
2119 pcid_addr = vcid_addr;
2120 }
2121
2122 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2123 vcid_addr += (i << PHY_CTX_SHIFT);
2124 pcid_addr += (i << PHY_CTX_SHIFT);
2125
2126 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2127 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2128
2129 /* Zero out the context. */
2130 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2131 CTX_WR(bp, 0x00, offset, 0);
2132
2133 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2134 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2135 }
2136 }
2137 }
2138
2139 static int
2140 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2141 {
2142 u16 *good_mbuf;
2143 u32 good_mbuf_cnt;
2144 u32 val;
2145
2146 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2147 if (good_mbuf == NULL) {
2148 printk(KERN_ERR PFX "Failed to allocate memory in "
2149 "bnx2_alloc_bad_rbuf\n");
2150 return -ENOMEM;
2151 }
2152
2153 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2154 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2155
2156 good_mbuf_cnt = 0;
2157
2158 /* Allocate a bunch of mbufs and save the good ones in an array. */
2159 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2160 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2161 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2162
2163 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2164
2165 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2166
2167 /* The addresses with Bit 9 set are bad memory blocks. */
2168 if (!(val & (1 << 9))) {
2169 good_mbuf[good_mbuf_cnt] = (u16) val;
2170 good_mbuf_cnt++;
2171 }
2172
2173 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2174 }
2175
2176 /* Free the good ones back to the mbuf pool thus discarding
2177 * all the bad ones. */
2178 while (good_mbuf_cnt) {
2179 good_mbuf_cnt--;
2180
2181 val = good_mbuf[good_mbuf_cnt];
2182 val = (val << 9) | val | 1;
2183
2184 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2185 }
2186 kfree(good_mbuf);
2187 return 0;
2188 }
2189
2190 static void
2191 bnx2_set_mac_addr(struct bnx2 *bp)
2192 {
2193 u32 val;
2194 u8 *mac_addr = bp->dev->dev_addr;
2195
2196 val = (mac_addr[0] << 8) | mac_addr[1];
2197
2198 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2199
2200 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2201 (mac_addr[4] << 8) | mac_addr[5];
2202
2203 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2204 }
2205
2206 static inline int
2207 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2208 {
2209 struct sk_buff *skb;
2210 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2211 dma_addr_t mapping;
2212 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2213 unsigned long align;
2214
2215 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2216 if (skb == NULL) {
2217 return -ENOMEM;
2218 }
2219
2220 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2221 skb_reserve(skb, BNX2_RX_ALIGN - align);
2222
2223 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2224 PCI_DMA_FROMDEVICE);
2225
2226 rx_buf->skb = skb;
2227 pci_unmap_addr_set(rx_buf, mapping, mapping);
2228
2229 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2230 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2231
2232 bp->rx_prod_bseq += bp->rx_buf_use_size;
2233
2234 return 0;
2235 }
2236
2237 static int
2238 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2239 {
2240 struct status_block *sblk = bp->status_blk;
2241 u32 new_link_state, old_link_state;
2242 int is_set = 1;
2243
2244 new_link_state = sblk->status_attn_bits & event;
2245 old_link_state = sblk->status_attn_bits_ack & event;
2246 if (new_link_state != old_link_state) {
2247 if (new_link_state)
2248 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2249 else
2250 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2251 } else
2252 is_set = 0;
2253
2254 return is_set;
2255 }
2256
2257 static void
2258 bnx2_phy_int(struct bnx2 *bp)
2259 {
2260 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2261 spin_lock(&bp->phy_lock);
2262 bnx2_set_link(bp);
2263 spin_unlock(&bp->phy_lock);
2264 }
2265 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2266 bnx2_set_remote_link(bp);
2267
2268 }
2269
2270 static void
2271 bnx2_tx_int(struct bnx2 *bp)
2272 {
2273 struct status_block *sblk = bp->status_blk;
2274 u16 hw_cons, sw_cons, sw_ring_cons;
2275 int tx_free_bd = 0;
2276
2277 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2278 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2279 hw_cons++;
2280 }
2281 sw_cons = bp->tx_cons;
2282
2283 while (sw_cons != hw_cons) {
2284 struct sw_bd *tx_buf;
2285 struct sk_buff *skb;
2286 int i, last;
2287
2288 sw_ring_cons = TX_RING_IDX(sw_cons);
2289
2290 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2291 skb = tx_buf->skb;
2292
2293 /* partial BD completions possible with TSO packets */
2294 if (skb_is_gso(skb)) {
2295 u16 last_idx, last_ring_idx;
2296
2297 last_idx = sw_cons +
2298 skb_shinfo(skb)->nr_frags + 1;
2299 last_ring_idx = sw_ring_cons +
2300 skb_shinfo(skb)->nr_frags + 1;
2301 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2302 last_idx++;
2303 }
2304 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2305 break;
2306 }
2307 }
2308
2309 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2310 skb_headlen(skb), PCI_DMA_TODEVICE);
2311
2312 tx_buf->skb = NULL;
2313 last = skb_shinfo(skb)->nr_frags;
2314
2315 for (i = 0; i < last; i++) {
2316 sw_cons = NEXT_TX_BD(sw_cons);
2317
2318 pci_unmap_page(bp->pdev,
2319 pci_unmap_addr(
2320 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2321 mapping),
2322 skb_shinfo(skb)->frags[i].size,
2323 PCI_DMA_TODEVICE);
2324 }
2325
2326 sw_cons = NEXT_TX_BD(sw_cons);
2327
2328 tx_free_bd += last + 1;
2329
2330 dev_kfree_skb(skb);
2331
2332 hw_cons = bp->hw_tx_cons =
2333 sblk->status_tx_quick_consumer_index0;
2334
2335 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2336 hw_cons++;
2337 }
2338 }
2339
2340 bp->tx_cons = sw_cons;
2341 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2342 * before checking for netif_queue_stopped(). Without the
2343 * memory barrier, there is a small possibility that bnx2_start_xmit()
2344 * will miss it and cause the queue to be stopped forever.
2345 */
2346 smp_mb();
2347
2348 if (unlikely(netif_queue_stopped(bp->dev)) &&
2349 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2350 netif_tx_lock(bp->dev);
2351 if ((netif_queue_stopped(bp->dev)) &&
2352 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2353 netif_wake_queue(bp->dev);
2354 netif_tx_unlock(bp->dev);
2355 }
2356 }
2357
2358 static inline void
2359 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2360 u16 cons, u16 prod)
2361 {
2362 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2363 struct rx_bd *cons_bd, *prod_bd;
2364
2365 cons_rx_buf = &bp->rx_buf_ring[cons];
2366 prod_rx_buf = &bp->rx_buf_ring[prod];
2367
2368 pci_dma_sync_single_for_device(bp->pdev,
2369 pci_unmap_addr(cons_rx_buf, mapping),
2370 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2371
2372 bp->rx_prod_bseq += bp->rx_buf_use_size;
2373
2374 prod_rx_buf->skb = skb;
2375
2376 if (cons == prod)
2377 return;
2378
2379 pci_unmap_addr_set(prod_rx_buf, mapping,
2380 pci_unmap_addr(cons_rx_buf, mapping));
2381
2382 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2383 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2384 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2385 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2386 }
2387
2388 static int
2389 bnx2_rx_int(struct bnx2 *bp, int budget)
2390 {
2391 struct status_block *sblk = bp->status_blk;
2392 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2393 struct l2_fhdr *rx_hdr;
2394 int rx_pkt = 0;
2395
2396 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2397 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2398 hw_cons++;
2399 }
2400 sw_cons = bp->rx_cons;
2401 sw_prod = bp->rx_prod;
2402
2403 /* Memory barrier necessary as speculative reads of the rx
2404 * buffer can be ahead of the index in the status block
2405 */
2406 rmb();
2407 while (sw_cons != hw_cons) {
2408 unsigned int len;
2409 u32 status;
2410 struct sw_bd *rx_buf;
2411 struct sk_buff *skb;
2412 dma_addr_t dma_addr;
2413
2414 sw_ring_cons = RX_RING_IDX(sw_cons);
2415 sw_ring_prod = RX_RING_IDX(sw_prod);
2416
2417 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2418 skb = rx_buf->skb;
2419
2420 rx_buf->skb = NULL;
2421
2422 dma_addr = pci_unmap_addr(rx_buf, mapping);
2423
2424 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2425 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2426
2427 rx_hdr = (struct l2_fhdr *) skb->data;
2428 len = rx_hdr->l2_fhdr_pkt_len - 4;
2429
2430 if ((status = rx_hdr->l2_fhdr_status) &
2431 (L2_FHDR_ERRORS_BAD_CRC |
2432 L2_FHDR_ERRORS_PHY_DECODE |
2433 L2_FHDR_ERRORS_ALIGNMENT |
2434 L2_FHDR_ERRORS_TOO_SHORT |
2435 L2_FHDR_ERRORS_GIANT_FRAME)) {
2436
2437 goto reuse_rx;
2438 }
2439
2440 /* Since we don't have a jumbo ring, copy small packets
2441 * if mtu > 1500
2442 */
2443 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2444 struct sk_buff *new_skb;
2445
2446 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2447 if (new_skb == NULL)
2448 goto reuse_rx;
2449
2450 /* aligned copy */
2451 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2452 new_skb->data, len + 2);
2453 skb_reserve(new_skb, 2);
2454 skb_put(new_skb, len);
2455
2456 bnx2_reuse_rx_skb(bp, skb,
2457 sw_ring_cons, sw_ring_prod);
2458
2459 skb = new_skb;
2460 }
2461 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2462 pci_unmap_single(bp->pdev, dma_addr,
2463 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2464
2465 skb_reserve(skb, bp->rx_offset);
2466 skb_put(skb, len);
2467 }
2468 else {
2469 reuse_rx:
2470 bnx2_reuse_rx_skb(bp, skb,
2471 sw_ring_cons, sw_ring_prod);
2472 goto next_rx;
2473 }
2474
2475 skb->protocol = eth_type_trans(skb, bp->dev);
2476
2477 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2478 (ntohs(skb->protocol) != 0x8100)) {
2479
2480 dev_kfree_skb(skb);
2481 goto next_rx;
2482
2483 }
2484
2485 skb->ip_summed = CHECKSUM_NONE;
2486 if (bp->rx_csum &&
2487 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2488 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2489
2490 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2491 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2492 skb->ip_summed = CHECKSUM_UNNECESSARY;
2493 }
2494
2495 #ifdef BCM_VLAN
2496 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2497 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2498 rx_hdr->l2_fhdr_vlan_tag);
2499 }
2500 else
2501 #endif
2502 netif_receive_skb(skb);
2503
2504 bp->dev->last_rx = jiffies;
2505 rx_pkt++;
2506
2507 next_rx:
2508 sw_cons = NEXT_RX_BD(sw_cons);
2509 sw_prod = NEXT_RX_BD(sw_prod);
2510
2511 if ((rx_pkt == budget))
2512 break;
2513
2514 /* Refresh hw_cons to see if there is new work */
2515 if (sw_cons == hw_cons) {
2516 hw_cons = bp->hw_rx_cons =
2517 sblk->status_rx_quick_consumer_index0;
2518 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2519 hw_cons++;
2520 rmb();
2521 }
2522 }
2523 bp->rx_cons = sw_cons;
2524 bp->rx_prod = sw_prod;
2525
2526 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2527
2528 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2529
2530 mmiowb();
2531
2532 return rx_pkt;
2533
2534 }
2535
2536 /* MSI ISR - The only difference between this and the INTx ISR
2537 * is that the MSI interrupt is always serviced.
2538 */
2539 static irqreturn_t
2540 bnx2_msi(int irq, void *dev_instance)
2541 {
2542 struct net_device *dev = dev_instance;
2543 struct bnx2 *bp = netdev_priv(dev);
2544
2545 prefetch(bp->status_blk);
2546 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2547 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2548 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2549
2550 /* Return here if interrupt is disabled. */
2551 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2552 return IRQ_HANDLED;
2553
2554 netif_rx_schedule(dev);
2555
2556 return IRQ_HANDLED;
2557 }
2558
2559 static irqreturn_t
2560 bnx2_msi_1shot(int irq, void *dev_instance)
2561 {
2562 struct net_device *dev = dev_instance;
2563 struct bnx2 *bp = netdev_priv(dev);
2564
2565 prefetch(bp->status_blk);
2566
2567 /* Return here if interrupt is disabled. */
2568 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2569 return IRQ_HANDLED;
2570
2571 netif_rx_schedule(dev);
2572
2573 return IRQ_HANDLED;
2574 }
2575
2576 static irqreturn_t
2577 bnx2_interrupt(int irq, void *dev_instance)
2578 {
2579 struct net_device *dev = dev_instance;
2580 struct bnx2 *bp = netdev_priv(dev);
2581 struct status_block *sblk = bp->status_blk;
2582
2583 /* When using INTx, it is possible for the interrupt to arrive
2584 * at the CPU before the status block posted prior to the
2585 * interrupt. Reading a register will flush the status block.
2586 * When using MSI, the MSI message will always complete after
2587 * the status block write.
2588 */
2589 if ((sblk->status_idx == bp->last_status_idx) &&
2590 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2591 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2592 return IRQ_NONE;
2593
2594 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2595 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2596 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2597
2598 /* Read back to deassert IRQ immediately to avoid too many
2599 * spurious interrupts.
2600 */
2601 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2602
2603 /* Return here if interrupt is shared and is disabled. */
2604 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2605 return IRQ_HANDLED;
2606
2607 if (netif_rx_schedule_prep(dev)) {
2608 bp->last_status_idx = sblk->status_idx;
2609 __netif_rx_schedule(dev);
2610 }
2611
2612 return IRQ_HANDLED;
2613 }
2614
2615 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2616 STATUS_ATTN_BITS_TIMER_ABORT)
2617
2618 static inline int
2619 bnx2_has_work(struct bnx2 *bp)
2620 {
2621 struct status_block *sblk = bp->status_blk;
2622
2623 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2624 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2625 return 1;
2626
2627 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2628 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2629 return 1;
2630
2631 return 0;
2632 }
2633
2634 static int
2635 bnx2_poll(struct net_device *dev, int *budget)
2636 {
2637 struct bnx2 *bp = netdev_priv(dev);
2638 struct status_block *sblk = bp->status_blk;
2639 u32 status_attn_bits = sblk->status_attn_bits;
2640 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2641
2642 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2643 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2644
2645 bnx2_phy_int(bp);
2646
2647 /* This is needed to take care of transient status
2648 * during link changes.
2649 */
2650 REG_WR(bp, BNX2_HC_COMMAND,
2651 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2652 REG_RD(bp, BNX2_HC_COMMAND);
2653 }
2654
2655 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2656 bnx2_tx_int(bp);
2657
2658 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2659 int orig_budget = *budget;
2660 int work_done;
2661
2662 if (orig_budget > dev->quota)
2663 orig_budget = dev->quota;
2664
2665 work_done = bnx2_rx_int(bp, orig_budget);
2666 *budget -= work_done;
2667 dev->quota -= work_done;
2668 }
2669
2670 bp->last_status_idx = bp->status_blk->status_idx;
2671 rmb();
2672
2673 if (!bnx2_has_work(bp)) {
2674 netif_rx_complete(dev);
2675 if (likely(bp->flags & USING_MSI_FLAG)) {
2676 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2677 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2678 bp->last_status_idx);
2679 return 0;
2680 }
2681 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2682 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2683 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2684 bp->last_status_idx);
2685
2686 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2687 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2688 bp->last_status_idx);
2689 return 0;
2690 }
2691
2692 return 1;
2693 }
2694
2695 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2696 * from set_multicast.
2697 */
2698 static void
2699 bnx2_set_rx_mode(struct net_device *dev)
2700 {
2701 struct bnx2 *bp = netdev_priv(dev);
2702 u32 rx_mode, sort_mode;
2703 int i;
2704
2705 spin_lock_bh(&bp->phy_lock);
2706
2707 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2708 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2709 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2710 #ifdef BCM_VLAN
2711 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2712 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2713 #else
2714 if (!(bp->flags & ASF_ENABLE_FLAG))
2715 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2716 #endif
2717 if (dev->flags & IFF_PROMISC) {
2718 /* Promiscuous mode. */
2719 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2720 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2721 BNX2_RPM_SORT_USER0_PROM_VLAN;
2722 }
2723 else if (dev->flags & IFF_ALLMULTI) {
2724 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2725 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2726 0xffffffff);
2727 }
2728 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2729 }
2730 else {
2731 /* Accept one or more multicast(s). */
2732 struct dev_mc_list *mclist;
2733 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2734 u32 regidx;
2735 u32 bit;
2736 u32 crc;
2737
2738 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2739
2740 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2741 i++, mclist = mclist->next) {
2742
2743 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2744 bit = crc & 0xff;
2745 regidx = (bit & 0xe0) >> 5;
2746 bit &= 0x1f;
2747 mc_filter[regidx] |= (1 << bit);
2748 }
2749
2750 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2751 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2752 mc_filter[i]);
2753 }
2754
2755 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2756 }
2757
2758 if (rx_mode != bp->rx_mode) {
2759 bp->rx_mode = rx_mode;
2760 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2761 }
2762
2763 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2764 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2765 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2766
2767 spin_unlock_bh(&bp->phy_lock);
2768 }
2769
2770 #define FW_BUF_SIZE 0x8000
2771
2772 static int
2773 bnx2_gunzip_init(struct bnx2 *bp)
2774 {
2775 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2776 goto gunzip_nomem1;
2777
2778 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2779 goto gunzip_nomem2;
2780
2781 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2782 if (bp->strm->workspace == NULL)
2783 goto gunzip_nomem3;
2784
2785 return 0;
2786
2787 gunzip_nomem3:
2788 kfree(bp->strm);
2789 bp->strm = NULL;
2790
2791 gunzip_nomem2:
2792 vfree(bp->gunzip_buf);
2793 bp->gunzip_buf = NULL;
2794
2795 gunzip_nomem1:
2796 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2797 "uncompression.\n", bp->dev->name);
2798 return -ENOMEM;
2799 }
2800
2801 static void
2802 bnx2_gunzip_end(struct bnx2 *bp)
2803 {
2804 kfree(bp->strm->workspace);
2805
2806 kfree(bp->strm);
2807 bp->strm = NULL;
2808
2809 if (bp->gunzip_buf) {
2810 vfree(bp->gunzip_buf);
2811 bp->gunzip_buf = NULL;
2812 }
2813 }
2814
2815 static int
2816 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2817 {
2818 int n, rc;
2819
2820 /* check gzip header */
2821 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2822 return -EINVAL;
2823
2824 n = 10;
2825
2826 #define FNAME 0x8
2827 if (zbuf[3] & FNAME)
2828 while ((zbuf[n++] != 0) && (n < len));
2829
2830 bp->strm->next_in = zbuf + n;
2831 bp->strm->avail_in = len - n;
2832 bp->strm->next_out = bp->gunzip_buf;
2833 bp->strm->avail_out = FW_BUF_SIZE;
2834
2835 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2836 if (rc != Z_OK)
2837 return rc;
2838
2839 rc = zlib_inflate(bp->strm, Z_FINISH);
2840
2841 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2842 *outbuf = bp->gunzip_buf;
2843
2844 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2845 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2846 bp->dev->name, bp->strm->msg);
2847
2848 zlib_inflateEnd(bp->strm);
2849
2850 if (rc == Z_STREAM_END)
2851 return 0;
2852
2853 return rc;
2854 }
2855
2856 static void
2857 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2858 u32 rv2p_proc)
2859 {
2860 int i;
2861 u32 val;
2862
2863
2864 for (i = 0; i < rv2p_code_len; i += 8) {
2865 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2866 rv2p_code++;
2867 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2868 rv2p_code++;
2869
2870 if (rv2p_proc == RV2P_PROC1) {
2871 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2872 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2873 }
2874 else {
2875 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2876 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2877 }
2878 }
2879
2880 /* Reset the processor, un-stall is done later. */
2881 if (rv2p_proc == RV2P_PROC1) {
2882 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2883 }
2884 else {
2885 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2886 }
2887 }
2888
2889 static int
2890 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2891 {
2892 u32 offset;
2893 u32 val;
2894 int rc;
2895
2896 /* Halt the CPU. */
2897 val = REG_RD_IND(bp, cpu_reg->mode);
2898 val |= cpu_reg->mode_value_halt;
2899 REG_WR_IND(bp, cpu_reg->mode, val);
2900 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2901
2902 /* Load the Text area. */
2903 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2904 if (fw->gz_text) {
2905 u32 text_len;
2906 void *text;
2907
2908 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2909 &text_len);
2910 if (rc)
2911 return rc;
2912
2913 fw->text = text;
2914 }
2915 if (fw->gz_text) {
2916 int j;
2917
2918 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2919 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2920 }
2921 }
2922
2923 /* Load the Data area. */
2924 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2925 if (fw->data) {
2926 int j;
2927
2928 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2929 REG_WR_IND(bp, offset, fw->data[j]);
2930 }
2931 }
2932
2933 /* Load the SBSS area. */
2934 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2935 if (fw->sbss) {
2936 int j;
2937
2938 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2939 REG_WR_IND(bp, offset, fw->sbss[j]);
2940 }
2941 }
2942
2943 /* Load the BSS area. */
2944 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2945 if (fw->bss) {
2946 int j;
2947
2948 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2949 REG_WR_IND(bp, offset, fw->bss[j]);
2950 }
2951 }
2952
2953 /* Load the Read-Only area. */
2954 offset = cpu_reg->spad_base +
2955 (fw->rodata_addr - cpu_reg->mips_view_base);
2956 if (fw->rodata) {
2957 int j;
2958
2959 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2960 REG_WR_IND(bp, offset, fw->rodata[j]);
2961 }
2962 }
2963
2964 /* Clear the pre-fetch instruction. */
2965 REG_WR_IND(bp, cpu_reg->inst, 0);
2966 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2967
2968 /* Start the CPU. */
2969 val = REG_RD_IND(bp, cpu_reg->mode);
2970 val &= ~cpu_reg->mode_value_halt;
2971 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2972 REG_WR_IND(bp, cpu_reg->mode, val);
2973
2974 return 0;
2975 }
2976
2977 static int
2978 bnx2_init_cpus(struct bnx2 *bp)
2979 {
2980 struct cpu_reg cpu_reg;
2981 struct fw_info *fw;
2982 int rc = 0;
2983 void *text;
2984 u32 text_len;
2985
2986 if ((rc = bnx2_gunzip_init(bp)) != 0)
2987 return rc;
2988
2989 /* Initialize the RV2P processor. */
2990 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2991 &text_len);
2992 if (rc)
2993 goto init_cpu_err;
2994
2995 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2996
2997 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2998 &text_len);
2999 if (rc)
3000 goto init_cpu_err;
3001
3002 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
3003
3004 /* Initialize the RX Processor. */
3005 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3006 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3007 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3008 cpu_reg.state = BNX2_RXP_CPU_STATE;
3009 cpu_reg.state_value_clear = 0xffffff;
3010 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3011 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3012 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3013 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3014 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3015 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3016 cpu_reg.mips_view_base = 0x8000000;
3017
3018 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3019 fw = &bnx2_rxp_fw_09;
3020 else
3021 fw = &bnx2_rxp_fw_06;
3022
3023 rc = load_cpu_fw(bp, &cpu_reg, fw);
3024 if (rc)
3025 goto init_cpu_err;
3026
3027 /* Initialize the TX Processor. */
3028 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3029 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3030 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3031 cpu_reg.state = BNX2_TXP_CPU_STATE;
3032 cpu_reg.state_value_clear = 0xffffff;
3033 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3034 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3035 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3036 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3037 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3038 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3039 cpu_reg.mips_view_base = 0x8000000;
3040
3041 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3042 fw = &bnx2_txp_fw_09;
3043 else
3044 fw = &bnx2_txp_fw_06;
3045
3046 rc = load_cpu_fw(bp, &cpu_reg, fw);
3047 if (rc)
3048 goto init_cpu_err;
3049
3050 /* Initialize the TX Patch-up Processor. */
3051 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3052 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3053 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3054 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3055 cpu_reg.state_value_clear = 0xffffff;
3056 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3057 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3058 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3059 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3060 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3061 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3062 cpu_reg.mips_view_base = 0x8000000;
3063
3064 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3065 fw = &bnx2_tpat_fw_09;
3066 else
3067 fw = &bnx2_tpat_fw_06;
3068
3069 rc = load_cpu_fw(bp, &cpu_reg, fw);
3070 if (rc)
3071 goto init_cpu_err;
3072
3073 /* Initialize the Completion Processor. */
3074 cpu_reg.mode = BNX2_COM_CPU_MODE;
3075 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3076 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3077 cpu_reg.state = BNX2_COM_CPU_STATE;
3078 cpu_reg.state_value_clear = 0xffffff;
3079 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3080 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3081 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3082 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3083 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3084 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3085 cpu_reg.mips_view_base = 0x8000000;
3086
3087 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3088 fw = &bnx2_com_fw_09;
3089 else
3090 fw = &bnx2_com_fw_06;
3091
3092 rc = load_cpu_fw(bp, &cpu_reg, fw);
3093 if (rc)
3094 goto init_cpu_err;
3095
3096 /* Initialize the Command Processor. */
3097 cpu_reg.mode = BNX2_CP_CPU_MODE;
3098 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3099 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3100 cpu_reg.state = BNX2_CP_CPU_STATE;
3101 cpu_reg.state_value_clear = 0xffffff;
3102 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3103 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3104 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3105 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3106 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3107 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3108 cpu_reg.mips_view_base = 0x8000000;
3109
3110 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3111 fw = &bnx2_cp_fw_09;
3112
3113 rc = load_cpu_fw(bp, &cpu_reg, fw);
3114 if (rc)
3115 goto init_cpu_err;
3116 }
3117 init_cpu_err:
3118 bnx2_gunzip_end(bp);
3119 return rc;
3120 }
3121
3122 static int
3123 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3124 {
3125 u16 pmcsr;
3126
3127 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3128
3129 switch (state) {
3130 case PCI_D0: {
3131 u32 val;
3132
3133 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3134 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3135 PCI_PM_CTRL_PME_STATUS);
3136
3137 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3138 /* delay required during transition out of D3hot */
3139 msleep(20);
3140
3141 val = REG_RD(bp, BNX2_EMAC_MODE);
3142 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3143 val &= ~BNX2_EMAC_MODE_MPKT;
3144 REG_WR(bp, BNX2_EMAC_MODE, val);
3145
3146 val = REG_RD(bp, BNX2_RPM_CONFIG);
3147 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3148 REG_WR(bp, BNX2_RPM_CONFIG, val);
3149 break;
3150 }
3151 case PCI_D3hot: {
3152 int i;
3153 u32 val, wol_msg;
3154
3155 if (bp->wol) {
3156 u32 advertising;
3157 u8 autoneg;
3158
3159 autoneg = bp->autoneg;
3160 advertising = bp->advertising;
3161
3162 bp->autoneg = AUTONEG_SPEED;
3163 bp->advertising = ADVERTISED_10baseT_Half |
3164 ADVERTISED_10baseT_Full |
3165 ADVERTISED_100baseT_Half |
3166 ADVERTISED_100baseT_Full |
3167 ADVERTISED_Autoneg;
3168
3169 bnx2_setup_copper_phy(bp);
3170
3171 bp->autoneg = autoneg;
3172 bp->advertising = advertising;
3173
3174 bnx2_set_mac_addr(bp);
3175
3176 val = REG_RD(bp, BNX2_EMAC_MODE);
3177
3178 /* Enable port mode. */
3179 val &= ~BNX2_EMAC_MODE_PORT;
3180 val |= BNX2_EMAC_MODE_PORT_MII |
3181 BNX2_EMAC_MODE_MPKT_RCVD |
3182 BNX2_EMAC_MODE_ACPI_RCVD |
3183 BNX2_EMAC_MODE_MPKT;
3184
3185 REG_WR(bp, BNX2_EMAC_MODE, val);
3186
3187 /* receive all multicast */
3188 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3189 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3190 0xffffffff);
3191 }
3192 REG_WR(bp, BNX2_EMAC_RX_MODE,
3193 BNX2_EMAC_RX_MODE_SORT_MODE);
3194
3195 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3196 BNX2_RPM_SORT_USER0_MC_EN;
3197 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3198 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3199 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3200 BNX2_RPM_SORT_USER0_ENA);
3201
3202 /* Need to enable EMAC and RPM for WOL. */
3203 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3204 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3205 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3206 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3207
3208 val = REG_RD(bp, BNX2_RPM_CONFIG);
3209 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3210 REG_WR(bp, BNX2_RPM_CONFIG, val);
3211
3212 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3213 }
3214 else {
3215 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3216 }
3217
3218 if (!(bp->flags & NO_WOL_FLAG))
3219 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3220
3221 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3222 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3223 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3224
3225 if (bp->wol)
3226 pmcsr |= 3;
3227 }
3228 else {
3229 pmcsr |= 3;
3230 }
3231 if (bp->wol) {
3232 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3233 }
3234 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3235 pmcsr);
3236
3237 /* No more memory access after this point until
3238 * device is brought back to D0.
3239 */
3240 udelay(50);
3241 break;
3242 }
3243 default:
3244 return -EINVAL;
3245 }
3246 return 0;
3247 }
3248
3249 static int
3250 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3251 {
3252 u32 val;
3253 int j;
3254
3255 /* Request access to the flash interface. */
3256 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3257 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3258 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3259 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3260 break;
3261
3262 udelay(5);
3263 }
3264
3265 if (j >= NVRAM_TIMEOUT_COUNT)
3266 return -EBUSY;
3267
3268 return 0;
3269 }
3270
3271 static int
3272 bnx2_release_nvram_lock(struct bnx2 *bp)
3273 {
3274 int j;
3275 u32 val;
3276
3277 /* Relinquish nvram interface. */
3278 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3279
3280 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3281 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3282 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3283 break;
3284
3285 udelay(5);
3286 }
3287
3288 if (j >= NVRAM_TIMEOUT_COUNT)
3289 return -EBUSY;
3290
3291 return 0;
3292 }
3293
3294
3295 static int
3296 bnx2_enable_nvram_write(struct bnx2 *bp)
3297 {
3298 u32 val;
3299
3300 val = REG_RD(bp, BNX2_MISC_CFG);
3301 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3302
3303 if (bp->flash_info->flags & BNX2_NV_WREN) {
3304 int j;
3305
3306 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3307 REG_WR(bp, BNX2_NVM_COMMAND,
3308 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3309
3310 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3311 udelay(5);
3312
3313 val = REG_RD(bp, BNX2_NVM_COMMAND);
3314 if (val & BNX2_NVM_COMMAND_DONE)
3315 break;
3316 }
3317
3318 if (j >= NVRAM_TIMEOUT_COUNT)
3319 return -EBUSY;
3320 }
3321 return 0;
3322 }
3323
3324 static void
3325 bnx2_disable_nvram_write(struct bnx2 *bp)
3326 {
3327 u32 val;
3328
3329 val = REG_RD(bp, BNX2_MISC_CFG);
3330 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3331 }
3332
3333
3334 static void
3335 bnx2_enable_nvram_access(struct bnx2 *bp)
3336 {
3337 u32 val;
3338
3339 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3340 /* Enable both bits, even on read. */
3341 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3342 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3343 }
3344
3345 static void
3346 bnx2_disable_nvram_access(struct bnx2 *bp)
3347 {
3348 u32 val;
3349
3350 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3351 /* Disable both bits, even after read. */
3352 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3353 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3354 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3355 }
3356
3357 static int
3358 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3359 {
3360 u32 cmd;
3361 int j;
3362
3363 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3364 /* Buffered flash, no erase needed */
3365 return 0;
3366
3367 /* Build an erase command */
3368 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3369 BNX2_NVM_COMMAND_DOIT;
3370
3371 /* Need to clear DONE bit separately. */
3372 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3373
3374 /* Address of the NVRAM to read from. */
3375 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3376
3377 /* Issue an erase command. */
3378 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3379
3380 /* Wait for completion. */
3381 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3382 u32 val;
3383
3384 udelay(5);
3385
3386 val = REG_RD(bp, BNX2_NVM_COMMAND);
3387 if (val & BNX2_NVM_COMMAND_DONE)
3388 break;
3389 }
3390
3391 if (j >= NVRAM_TIMEOUT_COUNT)
3392 return -EBUSY;
3393
3394 return 0;
3395 }
3396
3397 static int
3398 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3399 {
3400 u32 cmd;
3401 int j;
3402
3403 /* Build the command word. */
3404 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3405
3406 /* Calculate an offset of a buffered flash, not needed for 5709. */
3407 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3408 offset = ((offset / bp->flash_info->page_size) <<
3409 bp->flash_info->page_bits) +
3410 (offset % bp->flash_info->page_size);
3411 }
3412
3413 /* Need to clear DONE bit separately. */
3414 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3415
3416 /* Address of the NVRAM to read from. */
3417 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3418
3419 /* Issue a read command. */
3420 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3421
3422 /* Wait for completion. */
3423 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3424 u32 val;
3425
3426 udelay(5);
3427
3428 val = REG_RD(bp, BNX2_NVM_COMMAND);
3429 if (val & BNX2_NVM_COMMAND_DONE) {
3430 val = REG_RD(bp, BNX2_NVM_READ);
3431
3432 val = be32_to_cpu(val);
3433 memcpy(ret_val, &val, 4);
3434 break;
3435 }
3436 }
3437 if (j >= NVRAM_TIMEOUT_COUNT)
3438 return -EBUSY;
3439
3440 return 0;
3441 }
3442
3443
3444 static int
3445 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3446 {
3447 u32 cmd, val32;
3448 int j;
3449
3450 /* Build the command word. */
3451 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3452
3453 /* Calculate an offset of a buffered flash, not needed for 5709. */
3454 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3455 offset = ((offset / bp->flash_info->page_size) <<
3456 bp->flash_info->page_bits) +
3457 (offset % bp->flash_info->page_size);
3458 }
3459
3460 /* Need to clear DONE bit separately. */
3461 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3462
3463 memcpy(&val32, val, 4);
3464 val32 = cpu_to_be32(val32);
3465
3466 /* Write the data. */
3467 REG_WR(bp, BNX2_NVM_WRITE, val32);
3468
3469 /* Address of the NVRAM to write to. */
3470 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3471
3472 /* Issue the write command. */
3473 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3474
3475 /* Wait for completion. */
3476 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3477 udelay(5);
3478
3479 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3480 break;
3481 }
3482 if (j >= NVRAM_TIMEOUT_COUNT)
3483 return -EBUSY;
3484
3485 return 0;
3486 }
3487
3488 static int
3489 bnx2_init_nvram(struct bnx2 *bp)
3490 {
3491 u32 val;
3492 int j, entry_count, rc = 0;
3493 struct flash_spec *flash;
3494
3495 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3496 bp->flash_info = &flash_5709;
3497 goto get_flash_size;
3498 }
3499
3500 /* Determine the selected interface. */
3501 val = REG_RD(bp, BNX2_NVM_CFG1);
3502
3503 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3504
3505 if (val & 0x40000000) {
3506
3507 /* Flash interface has been reconfigured */
3508 for (j = 0, flash = &flash_table[0]; j < entry_count;
3509 j++, flash++) {
3510 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3511 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3512 bp->flash_info = flash;
3513 break;
3514 }
3515 }
3516 }
3517 else {
3518 u32 mask;
3519 /* Not yet been reconfigured */
3520
3521 if (val & (1 << 23))
3522 mask = FLASH_BACKUP_STRAP_MASK;
3523 else
3524 mask = FLASH_STRAP_MASK;
3525
3526 for (j = 0, flash = &flash_table[0]; j < entry_count;
3527 j++, flash++) {
3528
3529 if ((val & mask) == (flash->strapping & mask)) {
3530 bp->flash_info = flash;
3531
3532 /* Request access to the flash interface. */
3533 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3534 return rc;
3535
3536 /* Enable access to flash interface */
3537 bnx2_enable_nvram_access(bp);
3538
3539 /* Reconfigure the flash interface */
3540 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3541 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3542 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3543 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3544
3545 /* Disable access to flash interface */
3546 bnx2_disable_nvram_access(bp);
3547 bnx2_release_nvram_lock(bp);
3548
3549 break;
3550 }
3551 }
3552 } /* if (val & 0x40000000) */
3553
3554 if (j == entry_count) {
3555 bp->flash_info = NULL;
3556 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3557 return -ENODEV;
3558 }
3559
3560 get_flash_size:
3561 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3562 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3563 if (val)
3564 bp->flash_size = val;
3565 else
3566 bp->flash_size = bp->flash_info->total_size;
3567
3568 return rc;
3569 }
3570
3571 static int
3572 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3573 int buf_size)
3574 {
3575 int rc = 0;
3576 u32 cmd_flags, offset32, len32, extra;
3577
3578 if (buf_size == 0)
3579 return 0;
3580
3581 /* Request access to the flash interface. */
3582 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3583 return rc;
3584
3585 /* Enable access to flash interface */
3586 bnx2_enable_nvram_access(bp);
3587
3588 len32 = buf_size;
3589 offset32 = offset;
3590 extra = 0;
3591
3592 cmd_flags = 0;
3593
3594 if (offset32 & 3) {
3595 u8 buf[4];
3596 u32 pre_len;
3597
3598 offset32 &= ~3;
3599 pre_len = 4 - (offset & 3);
3600
3601 if (pre_len >= len32) {
3602 pre_len = len32;
3603 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3604 BNX2_NVM_COMMAND_LAST;
3605 }
3606 else {
3607 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3608 }
3609
3610 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3611
3612 if (rc)
3613 return rc;
3614
3615 memcpy(ret_buf, buf + (offset & 3), pre_len);
3616
3617 offset32 += 4;
3618 ret_buf += pre_len;
3619 len32 -= pre_len;
3620 }
3621 if (len32 & 3) {
3622 extra = 4 - (len32 & 3);
3623 len32 = (len32 + 4) & ~3;
3624 }
3625
3626 if (len32 == 4) {
3627 u8 buf[4];
3628
3629 if (cmd_flags)
3630 cmd_flags = BNX2_NVM_COMMAND_LAST;
3631 else
3632 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3633 BNX2_NVM_COMMAND_LAST;
3634
3635 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3636
3637 memcpy(ret_buf, buf, 4 - extra);
3638 }
3639 else if (len32 > 0) {
3640 u8 buf[4];
3641
3642 /* Read the first word. */
3643 if (cmd_flags)
3644 cmd_flags = 0;
3645 else
3646 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3647
3648 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3649
3650 /* Advance to the next dword. */
3651 offset32 += 4;
3652 ret_buf += 4;
3653 len32 -= 4;
3654
3655 while (len32 > 4 && rc == 0) {
3656 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3657
3658 /* Advance to the next dword. */
3659 offset32 += 4;
3660 ret_buf += 4;
3661 len32 -= 4;
3662 }
3663
3664 if (rc)
3665 return rc;
3666
3667 cmd_flags = BNX2_NVM_COMMAND_LAST;
3668 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3669
3670 memcpy(ret_buf, buf, 4 - extra);
3671 }
3672
3673 /* Disable access to flash interface */
3674 bnx2_disable_nvram_access(bp);
3675
3676 bnx2_release_nvram_lock(bp);
3677
3678 return rc;
3679 }
3680
3681 static int
3682 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3683 int buf_size)
3684 {
3685 u32 written, offset32, len32;
3686 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3687 int rc = 0;
3688 int align_start, align_end;
3689
3690 buf = data_buf;
3691 offset32 = offset;
3692 len32 = buf_size;
3693 align_start = align_end = 0;
3694
3695 if ((align_start = (offset32 & 3))) {
3696 offset32 &= ~3;
3697 len32 += align_start;
3698 if (len32 < 4)
3699 len32 = 4;
3700 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3701 return rc;
3702 }
3703
3704 if (len32 & 3) {
3705 align_end = 4 - (len32 & 3);
3706 len32 += align_end;
3707 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3708 return rc;
3709 }
3710
3711 if (align_start || align_end) {
3712 align_buf = kmalloc(len32, GFP_KERNEL);
3713 if (align_buf == NULL)
3714 return -ENOMEM;
3715 if (align_start) {
3716 memcpy(align_buf, start, 4);
3717 }
3718 if (align_end) {
3719 memcpy(align_buf + len32 - 4, end, 4);
3720 }
3721 memcpy(align_buf + align_start, data_buf, buf_size);
3722 buf = align_buf;
3723 }
3724
3725 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3726 flash_buffer = kmalloc(264, GFP_KERNEL);
3727 if (flash_buffer == NULL) {
3728 rc = -ENOMEM;
3729 goto nvram_write_end;
3730 }
3731 }
3732
3733 written = 0;
3734 while ((written < len32) && (rc == 0)) {
3735 u32 page_start, page_end, data_start, data_end;
3736 u32 addr, cmd_flags;
3737 int i;
3738
3739 /* Find the page_start addr */
3740 page_start = offset32 + written;
3741 page_start -= (page_start % bp->flash_info->page_size);
3742 /* Find the page_end addr */
3743 page_end = page_start + bp->flash_info->page_size;
3744 /* Find the data_start addr */
3745 data_start = (written == 0) ? offset32 : page_start;
3746 /* Find the data_end addr */
3747 data_end = (page_end > offset32 + len32) ?
3748 (offset32 + len32) : page_end;
3749
3750 /* Request access to the flash interface. */
3751 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3752 goto nvram_write_end;
3753
3754 /* Enable access to flash interface */
3755 bnx2_enable_nvram_access(bp);
3756
3757 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3758 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3759 int j;
3760
3761 /* Read the whole page into the buffer
3762 * (non-buffer flash only) */
3763 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3764 if (j == (bp->flash_info->page_size - 4)) {
3765 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3766 }
3767 rc = bnx2_nvram_read_dword(bp,
3768 page_start + j,
3769 &flash_buffer[j],
3770 cmd_flags);
3771
3772 if (rc)
3773 goto nvram_write_end;
3774
3775 cmd_flags = 0;
3776 }
3777 }
3778
3779 /* Enable writes to flash interface (unlock write-protect) */
3780 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3781 goto nvram_write_end;
3782
3783 /* Loop to write back the buffer data from page_start to
3784 * data_start */
3785 i = 0;
3786 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3787 /* Erase the page */
3788 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3789 goto nvram_write_end;
3790
3791 /* Re-enable the write again for the actual write */
3792 bnx2_enable_nvram_write(bp);
3793
3794 for (addr = page_start; addr < data_start;
3795 addr += 4, i += 4) {
3796
3797 rc = bnx2_nvram_write_dword(bp, addr,
3798 &flash_buffer[i], cmd_flags);
3799
3800 if (rc != 0)
3801 goto nvram_write_end;
3802
3803 cmd_flags = 0;
3804 }
3805 }
3806
3807 /* Loop to write the new data from data_start to data_end */
3808 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3809 if ((addr == page_end - 4) ||
3810 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3811 (addr == data_end - 4))) {
3812
3813 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3814 }
3815 rc = bnx2_nvram_write_dword(bp, addr, buf,
3816 cmd_flags);
3817
3818 if (rc != 0)
3819 goto nvram_write_end;
3820
3821 cmd_flags = 0;
3822 buf += 4;
3823 }
3824
3825 /* Loop to write back the buffer data from data_end
3826 * to page_end */
3827 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3828 for (addr = data_end; addr < page_end;
3829 addr += 4, i += 4) {
3830
3831 if (addr == page_end-4) {
3832 cmd_flags = BNX2_NVM_COMMAND_LAST;
3833 }
3834 rc = bnx2_nvram_write_dword(bp, addr,
3835 &flash_buffer[i], cmd_flags);
3836
3837 if (rc != 0)
3838 goto nvram_write_end;
3839
3840 cmd_flags = 0;
3841 }
3842 }
3843
3844 /* Disable writes to flash interface (lock write-protect) */
3845 bnx2_disable_nvram_write(bp);
3846
3847 /* Disable access to flash interface */
3848 bnx2_disable_nvram_access(bp);
3849 bnx2_release_nvram_lock(bp);
3850
3851 /* Increment written */
3852 written += data_end - data_start;
3853 }
3854
3855 nvram_write_end:
3856 kfree(flash_buffer);
3857 kfree(align_buf);
3858 return rc;
3859 }
3860
3861 static void
3862 bnx2_init_remote_phy(struct bnx2 *bp)
3863 {
3864 u32 val;
3865
3866 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3867 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3868 return;
3869
3870 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3871 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3872 return;
3873
3874 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3875 if (netif_running(bp->dev)) {
3876 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3877 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3878 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3879 val);
3880 }
3881 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3882
3883 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3884 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3885 bp->phy_port = PORT_FIBRE;
3886 else
3887 bp->phy_port = PORT_TP;
3888 }
3889 }
3890
3891 static int
3892 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3893 {
3894 u32 val;
3895 int i, rc = 0;
3896
3897 /* Wait for the current PCI transaction to complete before
3898 * issuing a reset. */
3899 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3900 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3901 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3902 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3903 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3904 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3905 udelay(5);
3906
3907 /* Wait for the firmware to tell us it is ok to issue a reset. */
3908 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3909
3910 /* Deposit a driver reset signature so the firmware knows that
3911 * this is a soft reset. */
3912 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3913 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3914
3915 /* Do a dummy read to force the chip to complete all current transaction
3916 * before we issue a reset. */
3917 val = REG_RD(bp, BNX2_MISC_ID);
3918
3919 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3920 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3921 REG_RD(bp, BNX2_MISC_COMMAND);
3922 udelay(5);
3923
3924 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3925 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3926
3927 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3928
3929 } else {
3930 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3931 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3932 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3933
3934 /* Chip reset. */
3935 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3936
3937 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3938 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3939 msleep(20);
3940
3941 /* Reset takes approximate 30 usec */
3942 for (i = 0; i < 10; i++) {
3943 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3944 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3945 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3946 break;
3947 udelay(10);
3948 }
3949
3950 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3951 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3952 printk(KERN_ERR PFX "Chip reset did not complete\n");
3953 return -EBUSY;
3954 }
3955 }
3956
3957 /* Make sure byte swapping is properly configured. */
3958 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3959 if (val != 0x01020304) {
3960 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3961 return -ENODEV;
3962 }
3963
3964 /* Wait for the firmware to finish its initialization. */
3965 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3966 if (rc)
3967 return rc;
3968
3969 spin_lock_bh(&bp->phy_lock);
3970 bnx2_init_remote_phy(bp);
3971 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3972 bnx2_set_default_remote_link(bp);
3973 spin_unlock_bh(&bp->phy_lock);
3974
3975 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3976 /* Adjust the voltage regular to two steps lower. The default
3977 * of this register is 0x0000000e. */
3978 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3979
3980 /* Remove bad rbuf memory from the free pool. */
3981 rc = bnx2_alloc_bad_rbuf(bp);
3982 }
3983
3984 return rc;
3985 }
3986
3987 static int
3988 bnx2_init_chip(struct bnx2 *bp)
3989 {
3990 u32 val;
3991 int rc;
3992
3993 /* Make sure the interrupt is not active. */
3994 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3995
3996 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3997 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3998 #ifdef __BIG_ENDIAN
3999 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4000 #endif
4001 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4002 DMA_READ_CHANS << 12 |
4003 DMA_WRITE_CHANS << 16;
4004
4005 val |= (0x2 << 20) | (1 << 11);
4006
4007 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4008 val |= (1 << 23);
4009
4010 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4011 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4012 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4013
4014 REG_WR(bp, BNX2_DMA_CONFIG, val);
4015
4016 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4017 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4018 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4019 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4020 }
4021
4022 if (bp->flags & PCIX_FLAG) {
4023 u16 val16;
4024
4025 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4026 &val16);
4027 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4028 val16 & ~PCI_X_CMD_ERO);
4029 }
4030
4031 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4032 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4033 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4034 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4035
4036 /* Initialize context mapping and zero out the quick contexts. The
4037 * context block must have already been enabled. */
4038 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4039 rc = bnx2_init_5709_context(bp);
4040 if (rc)
4041 return rc;
4042 } else
4043 bnx2_init_context(bp);
4044
4045 if ((rc = bnx2_init_cpus(bp)) != 0)
4046 return rc;
4047
4048 bnx2_init_nvram(bp);
4049
4050 bnx2_set_mac_addr(bp);
4051
4052 val = REG_RD(bp, BNX2_MQ_CONFIG);
4053 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4054 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4055 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4056 val |= BNX2_MQ_CONFIG_HALT_DIS;
4057
4058 REG_WR(bp, BNX2_MQ_CONFIG, val);
4059
4060 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4061 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4062 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4063
4064 val = (BCM_PAGE_BITS - 8) << 24;
4065 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4066
4067 /* Configure page size. */
4068 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4069 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4070 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4071 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4072
4073 val = bp->mac_addr[0] +
4074 (bp->mac_addr[1] << 8) +
4075 (bp->mac_addr[2] << 16) +
4076 bp->mac_addr[3] +
4077 (bp->mac_addr[4] << 8) +
4078 (bp->mac_addr[5] << 16);
4079 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4080
4081 /* Program the MTU. Also include 4 bytes for CRC32. */
4082 val = bp->dev->mtu + ETH_HLEN + 4;
4083 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4084 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4085 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4086
4087 bp->last_status_idx = 0;
4088 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4089
4090 /* Set up how to generate a link change interrupt. */
4091 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4092
4093 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4094 (u64) bp->status_blk_mapping & 0xffffffff);
4095 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4096
4097 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4098 (u64) bp->stats_blk_mapping & 0xffffffff);
4099 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4100 (u64) bp->stats_blk_mapping >> 32);
4101
4102 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4103 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4104
4105 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4106 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4107
4108 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4109 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4110
4111 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4112
4113 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4114
4115 REG_WR(bp, BNX2_HC_COM_TICKS,
4116 (bp->com_ticks_int << 16) | bp->com_ticks);
4117
4118 REG_WR(bp, BNX2_HC_CMD_TICKS,
4119 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4120
4121 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4122 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4123 else
4124 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4125 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4126
4127 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4128 val = BNX2_HC_CONFIG_COLLECT_STATS;
4129 else {
4130 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4131 BNX2_HC_CONFIG_COLLECT_STATS;
4132 }
4133
4134 if (bp->flags & ONE_SHOT_MSI_FLAG)
4135 val |= BNX2_HC_CONFIG_ONE_SHOT;
4136
4137 REG_WR(bp, BNX2_HC_CONFIG, val);
4138
4139 /* Clear internal stats counters. */
4140 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4141
4142 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4143
4144 /* Initialize the receive filter. */
4145 bnx2_set_rx_mode(bp->dev);
4146
4147 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4148 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4149 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4150 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4151 }
4152 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4153 0);
4154
4155 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4156 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4157
4158 udelay(20);
4159
4160 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4161
4162 return rc;
4163 }
4164
4165 static void
4166 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4167 {
4168 u32 val, offset0, offset1, offset2, offset3;
4169
4170 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4171 offset0 = BNX2_L2CTX_TYPE_XI;
4172 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4173 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4174 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4175 } else {
4176 offset0 = BNX2_L2CTX_TYPE;
4177 offset1 = BNX2_L2CTX_CMD_TYPE;
4178 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4179 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4180 }
4181 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4182 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4183
4184 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4185 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4186
4187 val = (u64) bp->tx_desc_mapping >> 32;
4188 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4189
4190 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4191 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4192 }
4193
4194 static void
4195 bnx2_init_tx_ring(struct bnx2 *bp)
4196 {
4197 struct tx_bd *txbd;
4198 u32 cid;
4199
4200 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4201
4202 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4203
4204 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4205 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4206
4207 bp->tx_prod = 0;
4208 bp->tx_cons = 0;
4209 bp->hw_tx_cons = 0;
4210 bp->tx_prod_bseq = 0;
4211
4212 cid = TX_CID;
4213 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4214 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4215
4216 bnx2_init_tx_context(bp, cid);
4217 }
4218
4219 static void
4220 bnx2_init_rx_ring(struct bnx2 *bp)
4221 {
4222 struct rx_bd *rxbd;
4223 int i;
4224 u16 prod, ring_prod;
4225 u32 val;
4226
4227 /* 8 for CRC and VLAN */
4228 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4229 /* hw alignment */
4230 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4231
4232 ring_prod = prod = bp->rx_prod = 0;
4233 bp->rx_cons = 0;
4234 bp->hw_rx_cons = 0;
4235 bp->rx_prod_bseq = 0;
4236
4237 for (i = 0; i < bp->rx_max_ring; i++) {
4238 int j;
4239
4240 rxbd = &bp->rx_desc_ring[i][0];
4241 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4242 rxbd->rx_bd_len = bp->rx_buf_use_size;
4243 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4244 }
4245 if (i == (bp->rx_max_ring - 1))
4246 j = 0;
4247 else
4248 j = i + 1;
4249 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4250 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4251 0xffffffff;
4252 }
4253
4254 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4255 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4256 val |= 0x02 << 8;
4257 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4258
4259 val = (u64) bp->rx_desc_mapping[0] >> 32;
4260 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4261
4262 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4263 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4264
4265 for (i = 0; i < bp->rx_ring_size; i++) {
4266 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4267 break;
4268 }
4269 prod = NEXT_RX_BD(prod);
4270 ring_prod = RX_RING_IDX(prod);
4271 }
4272 bp->rx_prod = prod;
4273
4274 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4275
4276 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4277 }
4278
4279 static void
4280 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4281 {
4282 u32 num_rings, max;
4283
4284 bp->rx_ring_size = size;
4285 num_rings = 1;
4286 while (size > MAX_RX_DESC_CNT) {
4287 size -= MAX_RX_DESC_CNT;
4288 num_rings++;
4289 }
4290 /* round to next power of 2 */
4291 max = MAX_RX_RINGS;
4292 while ((max & num_rings) == 0)
4293 max >>= 1;
4294
4295 if (num_rings != max)
4296 max <<= 1;
4297
4298 bp->rx_max_ring = max;
4299 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4300 }
4301
4302 static void
4303 bnx2_free_tx_skbs(struct bnx2 *bp)
4304 {
4305 int i;
4306
4307 if (bp->tx_buf_ring == NULL)
4308 return;
4309
4310 for (i = 0; i < TX_DESC_CNT; ) {
4311 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4312 struct sk_buff *skb = tx_buf->skb;
4313 int j, last;
4314
4315 if (skb == NULL) {
4316 i++;
4317 continue;
4318 }
4319
4320 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4321 skb_headlen(skb), PCI_DMA_TODEVICE);
4322
4323 tx_buf->skb = NULL;
4324
4325 last = skb_shinfo(skb)->nr_frags;
4326 for (j = 0; j < last; j++) {
4327 tx_buf = &bp->tx_buf_ring[i + j + 1];
4328 pci_unmap_page(bp->pdev,
4329 pci_unmap_addr(tx_buf, mapping),
4330 skb_shinfo(skb)->frags[j].size,
4331 PCI_DMA_TODEVICE);
4332 }
4333 dev_kfree_skb(skb);
4334 i += j + 1;
4335 }
4336
4337 }
4338
4339 static void
4340 bnx2_free_rx_skbs(struct bnx2 *bp)
4341 {
4342 int i;
4343
4344 if (bp->rx_buf_ring == NULL)
4345 return;
4346
4347 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4348 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4349 struct sk_buff *skb = rx_buf->skb;
4350
4351 if (skb == NULL)
4352 continue;
4353
4354 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4355 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4356
4357 rx_buf->skb = NULL;
4358
4359 dev_kfree_skb(skb);
4360 }
4361 }
4362
4363 static void
4364 bnx2_free_skbs(struct bnx2 *bp)
4365 {
4366 bnx2_free_tx_skbs(bp);
4367 bnx2_free_rx_skbs(bp);
4368 }
4369
4370 static int
4371 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4372 {
4373 int rc;
4374
4375 rc = bnx2_reset_chip(bp, reset_code);
4376 bnx2_free_skbs(bp);
4377 if (rc)
4378 return rc;
4379
4380 if ((rc = bnx2_init_chip(bp)) != 0)
4381 return rc;
4382
4383 bnx2_init_tx_ring(bp);
4384 bnx2_init_rx_ring(bp);
4385 return 0;
4386 }
4387
4388 static int
4389 bnx2_init_nic(struct bnx2 *bp)
4390 {
4391 int rc;
4392
4393 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4394 return rc;
4395
4396 spin_lock_bh(&bp->phy_lock);
4397 bnx2_init_phy(bp);
4398 bnx2_set_link(bp);
4399 spin_unlock_bh(&bp->phy_lock);
4400 return 0;
4401 }
4402
4403 static int
4404 bnx2_test_registers(struct bnx2 *bp)
4405 {
4406 int ret;
4407 int i, is_5709;
4408 static const struct {
4409 u16 offset;
4410 u16 flags;
4411 #define BNX2_FL_NOT_5709 1
4412 u32 rw_mask;
4413 u32 ro_mask;
4414 } reg_tbl[] = {
4415 { 0x006c, 0, 0x00000000, 0x0000003f },
4416 { 0x0090, 0, 0xffffffff, 0x00000000 },
4417 { 0x0094, 0, 0x00000000, 0x00000000 },
4418
4419 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4420 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4421 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4422 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4423 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4424 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4425 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4426 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4427 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4428
4429 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4430 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4431 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4432 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4433 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4434 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4435
4436 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4437 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4438 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4439
4440 { 0x1000, 0, 0x00000000, 0x00000001 },
4441 { 0x1004, 0, 0x00000000, 0x000f0001 },
4442
4443 { 0x1408, 0, 0x01c00800, 0x00000000 },
4444 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4445 { 0x14a8, 0, 0x00000000, 0x000001ff },
4446 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4447 { 0x14b0, 0, 0x00000002, 0x00000001 },
4448 { 0x14b8, 0, 0x00000000, 0x00000000 },
4449 { 0x14c0, 0, 0x00000000, 0x00000009 },
4450 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4451 { 0x14cc, 0, 0x00000000, 0x00000001 },
4452 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4453
4454 { 0x1800, 0, 0x00000000, 0x00000001 },
4455 { 0x1804, 0, 0x00000000, 0x00000003 },
4456
4457 { 0x2800, 0, 0x00000000, 0x00000001 },
4458 { 0x2804, 0, 0x00000000, 0x00003f01 },
4459 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4460 { 0x2810, 0, 0xffff0000, 0x00000000 },
4461 { 0x2814, 0, 0xffff0000, 0x00000000 },
4462 { 0x2818, 0, 0xffff0000, 0x00000000 },
4463 { 0x281c, 0, 0xffff0000, 0x00000000 },
4464 { 0x2834, 0, 0xffffffff, 0x00000000 },
4465 { 0x2840, 0, 0x00000000, 0xffffffff },
4466 { 0x2844, 0, 0x00000000, 0xffffffff },
4467 { 0x2848, 0, 0xffffffff, 0x00000000 },
4468 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4469
4470 { 0x2c00, 0, 0x00000000, 0x00000011 },
4471 { 0x2c04, 0, 0x00000000, 0x00030007 },
4472
4473 { 0x3c00, 0, 0x00000000, 0x00000001 },
4474 { 0x3c04, 0, 0x00000000, 0x00070000 },
4475 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4476 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4477 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4478 { 0x3c14, 0, 0x00000000, 0xffffffff },
4479 { 0x3c18, 0, 0x00000000, 0xffffffff },
4480 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4481 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4482
4483 { 0x5004, 0, 0x00000000, 0x0000007f },
4484 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4485
4486 { 0x5c00, 0, 0x00000000, 0x00000001 },
4487 { 0x5c04, 0, 0x00000000, 0x0003000f },
4488 { 0x5c08, 0, 0x00000003, 0x00000000 },
4489 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4490 { 0x5c10, 0, 0x00000000, 0xffffffff },
4491 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4492 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4493 { 0x5c88, 0, 0x00000000, 0x00077373 },
4494 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4495
4496 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4497 { 0x680c, 0, 0xffffffff, 0x00000000 },
4498 { 0x6810, 0, 0xffffffff, 0x00000000 },
4499 { 0x6814, 0, 0xffffffff, 0x00000000 },
4500 { 0x6818, 0, 0xffffffff, 0x00000000 },
4501 { 0x681c, 0, 0xffffffff, 0x00000000 },
4502 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4503 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4504 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4505 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4506 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4507 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4508 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4509 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4510 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4511 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4512 { 0x684c, 0, 0xffffffff, 0x00000000 },
4513 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4514 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4515 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4516 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4517 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4518 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4519
4520 { 0xffff, 0, 0x00000000, 0x00000000 },
4521 };
4522
4523 ret = 0;
4524 is_5709 = 0;
4525 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4526 is_5709 = 1;
4527
4528 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4529 u32 offset, rw_mask, ro_mask, save_val, val;
4530 u16 flags = reg_tbl[i].flags;
4531
4532 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4533 continue;
4534
4535 offset = (u32) reg_tbl[i].offset;
4536 rw_mask = reg_tbl[i].rw_mask;
4537 ro_mask = reg_tbl[i].ro_mask;
4538
4539 save_val = readl(bp->regview + offset);
4540
4541 writel(0, bp->regview + offset);
4542
4543 val = readl(bp->regview + offset);
4544 if ((val & rw_mask) != 0) {
4545 goto reg_test_err;
4546 }
4547
4548 if ((val & ro_mask) != (save_val & ro_mask)) {
4549 goto reg_test_err;
4550 }
4551
4552 writel(0xffffffff, bp->regview + offset);
4553
4554 val = readl(bp->regview + offset);
4555 if ((val & rw_mask) != rw_mask) {
4556 goto reg_test_err;
4557 }
4558
4559 if ((val & ro_mask) != (save_val & ro_mask)) {
4560 goto reg_test_err;
4561 }
4562
4563 writel(save_val, bp->regview + offset);
4564 continue;
4565
4566 reg_test_err:
4567 writel(save_val, bp->regview + offset);
4568 ret = -ENODEV;
4569 break;
4570 }
4571 return ret;
4572 }
4573
4574 static int
4575 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4576 {
4577 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4578 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4579 int i;
4580
4581 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4582 u32 offset;
4583
4584 for (offset = 0; offset < size; offset += 4) {
4585
4586 REG_WR_IND(bp, start + offset, test_pattern[i]);
4587
4588 if (REG_RD_IND(bp, start + offset) !=
4589 test_pattern[i]) {
4590 return -ENODEV;
4591 }
4592 }
4593 }
4594 return 0;
4595 }
4596
4597 static int
4598 bnx2_test_memory(struct bnx2 *bp)
4599 {
4600 int ret = 0;
4601 int i;
4602 static struct mem_entry {
4603 u32 offset;
4604 u32 len;
4605 } mem_tbl_5706[] = {
4606 { 0x60000, 0x4000 },
4607 { 0xa0000, 0x3000 },
4608 { 0xe0000, 0x4000 },
4609 { 0x120000, 0x4000 },
4610 { 0x1a0000, 0x4000 },
4611 { 0x160000, 0x4000 },
4612 { 0xffffffff, 0 },
4613 },
4614 mem_tbl_5709[] = {
4615 { 0x60000, 0x4000 },
4616 { 0xa0000, 0x3000 },
4617 { 0xe0000, 0x4000 },
4618 { 0x120000, 0x4000 },
4619 { 0x1a0000, 0x4000 },
4620 { 0xffffffff, 0 },
4621 };
4622 struct mem_entry *mem_tbl;
4623
4624 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4625 mem_tbl = mem_tbl_5709;
4626 else
4627 mem_tbl = mem_tbl_5706;
4628
4629 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4630 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4631 mem_tbl[i].len)) != 0) {
4632 return ret;
4633 }
4634 }
4635
4636 return ret;
4637 }
4638
4639 #define BNX2_MAC_LOOPBACK 0
4640 #define BNX2_PHY_LOOPBACK 1
4641
4642 static int
4643 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4644 {
4645 unsigned int pkt_size, num_pkts, i;
4646 struct sk_buff *skb, *rx_skb;
4647 unsigned char *packet;
4648 u16 rx_start_idx, rx_idx;
4649 dma_addr_t map;
4650 struct tx_bd *txbd;
4651 struct sw_bd *rx_buf;
4652 struct l2_fhdr *rx_hdr;
4653 int ret = -ENODEV;
4654
4655 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4656 bp->loopback = MAC_LOOPBACK;
4657 bnx2_set_mac_loopback(bp);
4658 }
4659 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4660 bp->loopback = PHY_LOOPBACK;
4661 bnx2_set_phy_loopback(bp);
4662 }
4663 else
4664 return -EINVAL;
4665
4666 pkt_size = 1514;
4667 skb = netdev_alloc_skb(bp->dev, pkt_size);
4668 if (!skb)
4669 return -ENOMEM;
4670 packet = skb_put(skb, pkt_size);
4671 memcpy(packet, bp->dev->dev_addr, 6);
4672 memset(packet + 6, 0x0, 8);
4673 for (i = 14; i < pkt_size; i++)
4674 packet[i] = (unsigned char) (i & 0xff);
4675
4676 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4677 PCI_DMA_TODEVICE);
4678
4679 REG_WR(bp, BNX2_HC_COMMAND,
4680 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4681
4682 REG_RD(bp, BNX2_HC_COMMAND);
4683
4684 udelay(5);
4685 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4686
4687 num_pkts = 0;
4688
4689 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4690
4691 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4692 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4693 txbd->tx_bd_mss_nbytes = pkt_size;
4694 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4695
4696 num_pkts++;
4697 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4698 bp->tx_prod_bseq += pkt_size;
4699
4700 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4701 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4702
4703 udelay(100);
4704
4705 REG_WR(bp, BNX2_HC_COMMAND,
4706 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4707
4708 REG_RD(bp, BNX2_HC_COMMAND);
4709
4710 udelay(5);
4711
4712 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4713 dev_kfree_skb(skb);
4714
4715 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4716 goto loopback_test_done;
4717 }
4718
4719 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4720 if (rx_idx != rx_start_idx + num_pkts) {
4721 goto loopback_test_done;
4722 }
4723
4724 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4725 rx_skb = rx_buf->skb;
4726
4727 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4728 skb_reserve(rx_skb, bp->rx_offset);
4729
4730 pci_dma_sync_single_for_cpu(bp->pdev,
4731 pci_unmap_addr(rx_buf, mapping),
4732 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4733
4734 if (rx_hdr->l2_fhdr_status &
4735 (L2_FHDR_ERRORS_BAD_CRC |
4736 L2_FHDR_ERRORS_PHY_DECODE |
4737 L2_FHDR_ERRORS_ALIGNMENT |
4738 L2_FHDR_ERRORS_TOO_SHORT |
4739 L2_FHDR_ERRORS_GIANT_FRAME)) {
4740
4741 goto loopback_test_done;
4742 }
4743
4744 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4745 goto loopback_test_done;
4746 }
4747
4748 for (i = 14; i < pkt_size; i++) {
4749 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4750 goto loopback_test_done;
4751 }
4752 }
4753
4754 ret = 0;
4755
4756 loopback_test_done:
4757 bp->loopback = 0;
4758 return ret;
4759 }
4760
4761 #define BNX2_MAC_LOOPBACK_FAILED 1
4762 #define BNX2_PHY_LOOPBACK_FAILED 2
4763 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4764 BNX2_PHY_LOOPBACK_FAILED)
4765
4766 static int
4767 bnx2_test_loopback(struct bnx2 *bp)
4768 {
4769 int rc = 0;
4770
4771 if (!netif_running(bp->dev))
4772 return BNX2_LOOPBACK_FAILED;
4773
4774 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4775 spin_lock_bh(&bp->phy_lock);
4776 bnx2_init_phy(bp);
4777 spin_unlock_bh(&bp->phy_lock);
4778 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4779 rc |= BNX2_MAC_LOOPBACK_FAILED;
4780 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4781 rc |= BNX2_PHY_LOOPBACK_FAILED;
4782 return rc;
4783 }
4784
4785 #define NVRAM_SIZE 0x200
4786 #define CRC32_RESIDUAL 0xdebb20e3
4787
4788 static int
4789 bnx2_test_nvram(struct bnx2 *bp)
4790 {
4791 u32 buf[NVRAM_SIZE / 4];
4792 u8 *data = (u8 *) buf;
4793 int rc = 0;
4794 u32 magic, csum;
4795
4796 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4797 goto test_nvram_done;
4798
4799 magic = be32_to_cpu(buf[0]);
4800 if (magic != 0x669955aa) {
4801 rc = -ENODEV;
4802 goto test_nvram_done;
4803 }
4804
4805 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4806 goto test_nvram_done;
4807
4808 csum = ether_crc_le(0x100, data);
4809 if (csum != CRC32_RESIDUAL) {
4810 rc = -ENODEV;
4811 goto test_nvram_done;
4812 }
4813
4814 csum = ether_crc_le(0x100, data + 0x100);
4815 if (csum != CRC32_RESIDUAL) {
4816 rc = -ENODEV;
4817 }
4818
4819 test_nvram_done:
4820 return rc;
4821 }
4822
4823 static int
4824 bnx2_test_link(struct bnx2 *bp)
4825 {
4826 u32 bmsr;
4827
4828 spin_lock_bh(&bp->phy_lock);
4829 bnx2_enable_bmsr1(bp);
4830 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4831 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4832 bnx2_disable_bmsr1(bp);
4833 spin_unlock_bh(&bp->phy_lock);
4834
4835 if (bmsr & BMSR_LSTATUS) {
4836 return 0;
4837 }
4838 return -ENODEV;
4839 }
4840
4841 static int
4842 bnx2_test_intr(struct bnx2 *bp)
4843 {
4844 int i;
4845 u16 status_idx;
4846
4847 if (!netif_running(bp->dev))
4848 return -ENODEV;
4849
4850 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4851
4852 /* This register is not touched during run-time. */
4853 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4854 REG_RD(bp, BNX2_HC_COMMAND);
4855
4856 for (i = 0; i < 10; i++) {
4857 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4858 status_idx) {
4859
4860 break;
4861 }
4862
4863 msleep_interruptible(10);
4864 }
4865 if (i < 10)
4866 return 0;
4867
4868 return -ENODEV;
4869 }
4870
4871 static void
4872 bnx2_5706_serdes_timer(struct bnx2 *bp)
4873 {
4874 spin_lock(&bp->phy_lock);
4875 if (bp->serdes_an_pending)
4876 bp->serdes_an_pending--;
4877 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4878 u32 bmcr;
4879
4880 bp->current_interval = bp->timer_interval;
4881
4882 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4883
4884 if (bmcr & BMCR_ANENABLE) {
4885 u32 phy1, phy2;
4886
4887 bnx2_write_phy(bp, 0x1c, 0x7c00);
4888 bnx2_read_phy(bp, 0x1c, &phy1);
4889
4890 bnx2_write_phy(bp, 0x17, 0x0f01);
4891 bnx2_read_phy(bp, 0x15, &phy2);
4892 bnx2_write_phy(bp, 0x17, 0x0f01);
4893 bnx2_read_phy(bp, 0x15, &phy2);
4894
4895 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4896 !(phy2 & 0x20)) { /* no CONFIG */
4897
4898 bmcr &= ~BMCR_ANENABLE;
4899 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4900 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4901 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4902 }
4903 }
4904 }
4905 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4906 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4907 u32 phy2;
4908
4909 bnx2_write_phy(bp, 0x17, 0x0f01);
4910 bnx2_read_phy(bp, 0x15, &phy2);
4911 if (phy2 & 0x20) {
4912 u32 bmcr;
4913
4914 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4915 bmcr |= BMCR_ANENABLE;
4916 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4917
4918 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4919 }
4920 } else
4921 bp->current_interval = bp->timer_interval;
4922
4923 spin_unlock(&bp->phy_lock);
4924 }
4925
4926 static void
4927 bnx2_5708_serdes_timer(struct bnx2 *bp)
4928 {
4929 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4930 return;
4931
4932 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4933 bp->serdes_an_pending = 0;
4934 return;
4935 }
4936
4937 spin_lock(&bp->phy_lock);
4938 if (bp->serdes_an_pending)
4939 bp->serdes_an_pending--;
4940 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4941 u32 bmcr;
4942
4943 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4944 if (bmcr & BMCR_ANENABLE) {
4945 bnx2_enable_forced_2g5(bp);
4946 bp->current_interval = SERDES_FORCED_TIMEOUT;
4947 } else {
4948 bnx2_disable_forced_2g5(bp);
4949 bp->serdes_an_pending = 2;
4950 bp->current_interval = bp->timer_interval;
4951 }
4952
4953 } else
4954 bp->current_interval = bp->timer_interval;
4955
4956 spin_unlock(&bp->phy_lock);
4957 }
4958
4959 static void
4960 bnx2_timer(unsigned long data)
4961 {
4962 struct bnx2 *bp = (struct bnx2 *) data;
4963
4964 if (!netif_running(bp->dev))
4965 return;
4966
4967 if (atomic_read(&bp->intr_sem) != 0)
4968 goto bnx2_restart_timer;
4969
4970 bnx2_send_heart_beat(bp);
4971
4972 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4973
4974 /* workaround occasional corrupted counters */
4975 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4976 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4977 BNX2_HC_COMMAND_STATS_NOW);
4978
4979 if (bp->phy_flags & PHY_SERDES_FLAG) {
4980 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4981 bnx2_5706_serdes_timer(bp);
4982 else
4983 bnx2_5708_serdes_timer(bp);
4984 }
4985
4986 bnx2_restart_timer:
4987 mod_timer(&bp->timer, jiffies + bp->current_interval);
4988 }
4989
4990 static int
4991 bnx2_request_irq(struct bnx2 *bp)
4992 {
4993 struct net_device *dev = bp->dev;
4994 int rc = 0;
4995
4996 if (bp->flags & USING_MSI_FLAG) {
4997 irq_handler_t fn = bnx2_msi;
4998
4999 if (bp->flags & ONE_SHOT_MSI_FLAG)
5000 fn = bnx2_msi_1shot;
5001
5002 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
5003 } else
5004 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
5005 IRQF_SHARED, dev->name, dev);
5006 return rc;
5007 }
5008
5009 static void
5010 bnx2_free_irq(struct bnx2 *bp)
5011 {
5012 struct net_device *dev = bp->dev;
5013
5014 if (bp->flags & USING_MSI_FLAG) {
5015 free_irq(bp->pdev->irq, dev);
5016 pci_disable_msi(bp->pdev);
5017 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5018 } else
5019 free_irq(bp->pdev->irq, dev);
5020 }
5021
5022 /* Called with rtnl_lock */
5023 static int
5024 bnx2_open(struct net_device *dev)
5025 {
5026 struct bnx2 *bp = netdev_priv(dev);
5027 int rc;
5028
5029 netif_carrier_off(dev);
5030
5031 bnx2_set_power_state(bp, PCI_D0);
5032 bnx2_disable_int(bp);
5033
5034 rc = bnx2_alloc_mem(bp);
5035 if (rc)
5036 return rc;
5037
5038 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5039 if (pci_enable_msi(bp->pdev) == 0) {
5040 bp->flags |= USING_MSI_FLAG;
5041 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5042 bp->flags |= ONE_SHOT_MSI_FLAG;
5043 }
5044 }
5045 rc = bnx2_request_irq(bp);
5046
5047 if (rc) {
5048 bnx2_free_mem(bp);
5049 return rc;
5050 }
5051
5052 rc = bnx2_init_nic(bp);
5053
5054 if (rc) {
5055 bnx2_free_irq(bp);
5056 bnx2_free_skbs(bp);
5057 bnx2_free_mem(bp);
5058 return rc;
5059 }
5060
5061 mod_timer(&bp->timer, jiffies + bp->current_interval);
5062
5063 atomic_set(&bp->intr_sem, 0);
5064
5065 bnx2_enable_int(bp);
5066
5067 if (bp->flags & USING_MSI_FLAG) {
5068 /* Test MSI to make sure it is working
5069 * If MSI test fails, go back to INTx mode
5070 */
5071 if (bnx2_test_intr(bp) != 0) {
5072 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5073 " using MSI, switching to INTx mode. Please"
5074 " report this failure to the PCI maintainer"
5075 " and include system chipset information.\n",
5076 bp->dev->name);
5077
5078 bnx2_disable_int(bp);
5079 bnx2_free_irq(bp);
5080
5081 rc = bnx2_init_nic(bp);
5082
5083 if (!rc)
5084 rc = bnx2_request_irq(bp);
5085
5086 if (rc) {
5087 bnx2_free_skbs(bp);
5088 bnx2_free_mem(bp);
5089 del_timer_sync(&bp->timer);
5090 return rc;
5091 }
5092 bnx2_enable_int(bp);
5093 }
5094 }
5095 if (bp->flags & USING_MSI_FLAG) {
5096 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5097 }
5098
5099 netif_start_queue(dev);
5100
5101 return 0;
5102 }
5103
5104 static void
5105 bnx2_reset_task(struct work_struct *work)
5106 {
5107 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5108
5109 if (!netif_running(bp->dev))
5110 return;
5111
5112 bp->in_reset_task = 1;
5113 bnx2_netif_stop(bp);
5114
5115 bnx2_init_nic(bp);
5116
5117 atomic_set(&bp->intr_sem, 1);
5118 bnx2_netif_start(bp);
5119 bp->in_reset_task = 0;
5120 }
5121
5122 static void
5123 bnx2_tx_timeout(struct net_device *dev)
5124 {
5125 struct bnx2 *bp = netdev_priv(dev);
5126
5127 /* This allows the netif to be shutdown gracefully before resetting */
5128 schedule_work(&bp->reset_task);
5129 }
5130
5131 #ifdef BCM_VLAN
5132 /* Called with rtnl_lock */
5133 static void
5134 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5135 {
5136 struct bnx2 *bp = netdev_priv(dev);
5137
5138 bnx2_netif_stop(bp);
5139
5140 bp->vlgrp = vlgrp;
5141 bnx2_set_rx_mode(dev);
5142
5143 bnx2_netif_start(bp);
5144 }
5145 #endif
5146
5147 /* Called with netif_tx_lock.
5148 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5149 * netif_wake_queue().
5150 */
5151 static int
5152 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5153 {
5154 struct bnx2 *bp = netdev_priv(dev);
5155 dma_addr_t mapping;
5156 struct tx_bd *txbd;
5157 struct sw_bd *tx_buf;
5158 u32 len, vlan_tag_flags, last_frag, mss;
5159 u16 prod, ring_prod;
5160 int i;
5161
5162 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5163 netif_stop_queue(dev);
5164 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5165 dev->name);
5166
5167 return NETDEV_TX_BUSY;
5168 }
5169 len = skb_headlen(skb);
5170 prod = bp->tx_prod;
5171 ring_prod = TX_RING_IDX(prod);
5172
5173 vlan_tag_flags = 0;
5174 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5175 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5176 }
5177
5178 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5179 vlan_tag_flags |=
5180 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5181 }
5182 if ((mss = skb_shinfo(skb)->gso_size)) {
5183 u32 tcp_opt_len, ip_tcp_len;
5184 struct iphdr *iph;
5185
5186 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5187
5188 tcp_opt_len = tcp_optlen(skb);
5189
5190 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5191 u32 tcp_off = skb_transport_offset(skb) -
5192 sizeof(struct ipv6hdr) - ETH_HLEN;
5193
5194 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5195 TX_BD_FLAGS_SW_FLAGS;
5196 if (likely(tcp_off == 0))
5197 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5198 else {
5199 tcp_off >>= 3;
5200 vlan_tag_flags |= ((tcp_off & 0x3) <<
5201 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5202 ((tcp_off & 0x10) <<
5203 TX_BD_FLAGS_TCP6_OFF4_SHL);
5204 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5205 }
5206 } else {
5207 if (skb_header_cloned(skb) &&
5208 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5209 dev_kfree_skb(skb);
5210 return NETDEV_TX_OK;
5211 }
5212
5213 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5214
5215 iph = ip_hdr(skb);
5216 iph->check = 0;
5217 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5218 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5219 iph->daddr, 0,
5220 IPPROTO_TCP,
5221 0);
5222 if (tcp_opt_len || (iph->ihl > 5)) {
5223 vlan_tag_flags |= ((iph->ihl - 5) +
5224 (tcp_opt_len >> 2)) << 8;
5225 }
5226 }
5227 } else
5228 mss = 0;
5229
5230 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5231
5232 tx_buf = &bp->tx_buf_ring[ring_prod];
5233 tx_buf->skb = skb;
5234 pci_unmap_addr_set(tx_buf, mapping, mapping);
5235
5236 txbd = &bp->tx_desc_ring[ring_prod];
5237
5238 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5239 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5240 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5241 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5242
5243 last_frag = skb_shinfo(skb)->nr_frags;
5244
5245 for (i = 0; i < last_frag; i++) {
5246 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5247
5248 prod = NEXT_TX_BD(prod);
5249 ring_prod = TX_RING_IDX(prod);
5250 txbd = &bp->tx_desc_ring[ring_prod];
5251
5252 len = frag->size;
5253 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5254 len, PCI_DMA_TODEVICE);
5255 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5256 mapping, mapping);
5257
5258 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5259 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5260 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5261 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5262
5263 }
5264 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5265
5266 prod = NEXT_TX_BD(prod);
5267 bp->tx_prod_bseq += skb->len;
5268
5269 REG_WR16(bp, bp->tx_bidx_addr, prod);
5270 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5271
5272 mmiowb();
5273
5274 bp->tx_prod = prod;
5275 dev->trans_start = jiffies;
5276
5277 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5278 netif_stop_queue(dev);
5279 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5280 netif_wake_queue(dev);
5281 }
5282
5283 return NETDEV_TX_OK;
5284 }
5285
5286 /* Called with rtnl_lock */
5287 static int
5288 bnx2_close(struct net_device *dev)
5289 {
5290 struct bnx2 *bp = netdev_priv(dev);
5291 u32 reset_code;
5292
5293 /* Calling flush_scheduled_work() may deadlock because
5294 * linkwatch_event() may be on the workqueue and it will try to get
5295 * the rtnl_lock which we are holding.
5296 */
5297 while (bp->in_reset_task)
5298 msleep(1);
5299
5300 bnx2_netif_stop(bp);
5301 del_timer_sync(&bp->timer);
5302 if (bp->flags & NO_WOL_FLAG)
5303 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5304 else if (bp->wol)
5305 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5306 else
5307 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5308 bnx2_reset_chip(bp, reset_code);
5309 bnx2_free_irq(bp);
5310 bnx2_free_skbs(bp);
5311 bnx2_free_mem(bp);
5312 bp->link_up = 0;
5313 netif_carrier_off(bp->dev);
5314 bnx2_set_power_state(bp, PCI_D3hot);
5315 return 0;
5316 }
5317
5318 #define GET_NET_STATS64(ctr) \
5319 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5320 (unsigned long) (ctr##_lo)
5321
5322 #define GET_NET_STATS32(ctr) \
5323 (ctr##_lo)
5324
5325 #if (BITS_PER_LONG == 64)
5326 #define GET_NET_STATS GET_NET_STATS64
5327 #else
5328 #define GET_NET_STATS GET_NET_STATS32
5329 #endif
5330
5331 static struct net_device_stats *
5332 bnx2_get_stats(struct net_device *dev)
5333 {
5334 struct bnx2 *bp = netdev_priv(dev);
5335 struct statistics_block *stats_blk = bp->stats_blk;
5336 struct net_device_stats *net_stats = &bp->net_stats;
5337
5338 if (bp->stats_blk == NULL) {
5339 return net_stats;
5340 }
5341 net_stats->rx_packets =
5342 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5343 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5344 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5345
5346 net_stats->tx_packets =
5347 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5348 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5349 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5350
5351 net_stats->rx_bytes =
5352 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5353
5354 net_stats->tx_bytes =
5355 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5356
5357 net_stats->multicast =
5358 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5359
5360 net_stats->collisions =
5361 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5362
5363 net_stats->rx_length_errors =
5364 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5365 stats_blk->stat_EtherStatsOverrsizePkts);
5366
5367 net_stats->rx_over_errors =
5368 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5369
5370 net_stats->rx_frame_errors =
5371 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5372
5373 net_stats->rx_crc_errors =
5374 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5375
5376 net_stats->rx_errors = net_stats->rx_length_errors +
5377 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5378 net_stats->rx_crc_errors;
5379
5380 net_stats->tx_aborted_errors =
5381 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5382 stats_blk->stat_Dot3StatsLateCollisions);
5383
5384 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5385 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5386 net_stats->tx_carrier_errors = 0;
5387 else {
5388 net_stats->tx_carrier_errors =
5389 (unsigned long)
5390 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5391 }
5392
5393 net_stats->tx_errors =
5394 (unsigned long)
5395 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5396 +
5397 net_stats->tx_aborted_errors +
5398 net_stats->tx_carrier_errors;
5399
5400 net_stats->rx_missed_errors =
5401 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5402 stats_blk->stat_FwRxDrop);
5403
5404 return net_stats;
5405 }
5406
5407 /* All ethtool functions called with rtnl_lock */
5408
5409 static int
5410 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5411 {
5412 struct bnx2 *bp = netdev_priv(dev);
5413 int support_serdes = 0, support_copper = 0;
5414
5415 cmd->supported = SUPPORTED_Autoneg;
5416 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5417 support_serdes = 1;
5418 support_copper = 1;
5419 } else if (bp->phy_port == PORT_FIBRE)
5420 support_serdes = 1;
5421 else
5422 support_copper = 1;
5423
5424 if (support_serdes) {
5425 cmd->supported |= SUPPORTED_1000baseT_Full |
5426 SUPPORTED_FIBRE;
5427 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5428 cmd->supported |= SUPPORTED_2500baseX_Full;
5429
5430 }
5431 if (support_copper) {
5432 cmd->supported |= SUPPORTED_10baseT_Half |
5433 SUPPORTED_10baseT_Full |
5434 SUPPORTED_100baseT_Half |
5435 SUPPORTED_100baseT_Full |
5436 SUPPORTED_1000baseT_Full |
5437 SUPPORTED_TP;
5438
5439 }
5440
5441 spin_lock_bh(&bp->phy_lock);
5442 cmd->port = bp->phy_port;
5443 cmd->advertising = bp->advertising;
5444
5445 if (bp->autoneg & AUTONEG_SPEED) {
5446 cmd->autoneg = AUTONEG_ENABLE;
5447 }
5448 else {
5449 cmd->autoneg = AUTONEG_DISABLE;
5450 }
5451
5452 if (netif_carrier_ok(dev)) {
5453 cmd->speed = bp->line_speed;
5454 cmd->duplex = bp->duplex;
5455 }
5456 else {
5457 cmd->speed = -1;
5458 cmd->duplex = -1;
5459 }
5460 spin_unlock_bh(&bp->phy_lock);
5461
5462 cmd->transceiver = XCVR_INTERNAL;
5463 cmd->phy_address = bp->phy_addr;
5464
5465 return 0;
5466 }
5467
5468 static int
5469 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5470 {
5471 struct bnx2 *bp = netdev_priv(dev);
5472 u8 autoneg = bp->autoneg;
5473 u8 req_duplex = bp->req_duplex;
5474 u16 req_line_speed = bp->req_line_speed;
5475 u32 advertising = bp->advertising;
5476 int err = -EINVAL;
5477
5478 spin_lock_bh(&bp->phy_lock);
5479
5480 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5481 goto err_out_unlock;
5482
5483 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5484 goto err_out_unlock;
5485
5486 if (cmd->autoneg == AUTONEG_ENABLE) {
5487 autoneg |= AUTONEG_SPEED;
5488
5489 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5490
5491 /* allow advertising 1 speed */
5492 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5493 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5494 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5495 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5496
5497 if (cmd->port == PORT_FIBRE)
5498 goto err_out_unlock;
5499
5500 advertising = cmd->advertising;
5501
5502 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5503 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5504 (cmd->port == PORT_TP))
5505 goto err_out_unlock;
5506 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5507 advertising = cmd->advertising;
5508 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5509 goto err_out_unlock;
5510 else {
5511 if (cmd->port == PORT_FIBRE)
5512 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5513 else
5514 advertising = ETHTOOL_ALL_COPPER_SPEED;
5515 }
5516 advertising |= ADVERTISED_Autoneg;
5517 }
5518 else {
5519 if (cmd->port == PORT_FIBRE) {
5520 if ((cmd->speed != SPEED_1000 &&
5521 cmd->speed != SPEED_2500) ||
5522 (cmd->duplex != DUPLEX_FULL))
5523 goto err_out_unlock;
5524
5525 if (cmd->speed == SPEED_2500 &&
5526 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5527 goto err_out_unlock;
5528 }
5529 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5530 goto err_out_unlock;
5531
5532 autoneg &= ~AUTONEG_SPEED;
5533 req_line_speed = cmd->speed;
5534 req_duplex = cmd->duplex;
5535 advertising = 0;
5536 }
5537
5538 bp->autoneg = autoneg;
5539 bp->advertising = advertising;
5540 bp->req_line_speed = req_line_speed;
5541 bp->req_duplex = req_duplex;
5542
5543 err = bnx2_setup_phy(bp, cmd->port);
5544
5545 err_out_unlock:
5546 spin_unlock_bh(&bp->phy_lock);
5547
5548 return err;
5549 }
5550
5551 static void
5552 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5553 {
5554 struct bnx2 *bp = netdev_priv(dev);
5555
5556 strcpy(info->driver, DRV_MODULE_NAME);
5557 strcpy(info->version, DRV_MODULE_VERSION);
5558 strcpy(info->bus_info, pci_name(bp->pdev));
5559 strcpy(info->fw_version, bp->fw_version);
5560 }
5561
5562 #define BNX2_REGDUMP_LEN (32 * 1024)
5563
5564 static int
5565 bnx2_get_regs_len(struct net_device *dev)
5566 {
5567 return BNX2_REGDUMP_LEN;
5568 }
5569
5570 static void
5571 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5572 {
5573 u32 *p = _p, i, offset;
5574 u8 *orig_p = _p;
5575 struct bnx2 *bp = netdev_priv(dev);
5576 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5577 0x0800, 0x0880, 0x0c00, 0x0c10,
5578 0x0c30, 0x0d08, 0x1000, 0x101c,
5579 0x1040, 0x1048, 0x1080, 0x10a4,
5580 0x1400, 0x1490, 0x1498, 0x14f0,
5581 0x1500, 0x155c, 0x1580, 0x15dc,
5582 0x1600, 0x1658, 0x1680, 0x16d8,
5583 0x1800, 0x1820, 0x1840, 0x1854,
5584 0x1880, 0x1894, 0x1900, 0x1984,
5585 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5586 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5587 0x2000, 0x2030, 0x23c0, 0x2400,
5588 0x2800, 0x2820, 0x2830, 0x2850,
5589 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5590 0x3c00, 0x3c94, 0x4000, 0x4010,
5591 0x4080, 0x4090, 0x43c0, 0x4458,
5592 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5593 0x4fc0, 0x5010, 0x53c0, 0x5444,
5594 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5595 0x5fc0, 0x6000, 0x6400, 0x6428,
5596 0x6800, 0x6848, 0x684c, 0x6860,
5597 0x6888, 0x6910, 0x8000 };
5598
5599 regs->version = 0;
5600
5601 memset(p, 0, BNX2_REGDUMP_LEN);
5602
5603 if (!netif_running(bp->dev))
5604 return;
5605
5606 i = 0;
5607 offset = reg_boundaries[0];
5608 p += offset;
5609 while (offset < BNX2_REGDUMP_LEN) {
5610 *p++ = REG_RD(bp, offset);
5611 offset += 4;
5612 if (offset == reg_boundaries[i + 1]) {
5613 offset = reg_boundaries[i + 2];
5614 p = (u32 *) (orig_p + offset);
5615 i += 2;
5616 }
5617 }
5618 }
5619
5620 static void
5621 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5622 {
5623 struct bnx2 *bp = netdev_priv(dev);
5624
5625 if (bp->flags & NO_WOL_FLAG) {
5626 wol->supported = 0;
5627 wol->wolopts = 0;
5628 }
5629 else {
5630 wol->supported = WAKE_MAGIC;
5631 if (bp->wol)
5632 wol->wolopts = WAKE_MAGIC;
5633 else
5634 wol->wolopts = 0;
5635 }
5636 memset(&wol->sopass, 0, sizeof(wol->sopass));
5637 }
5638
5639 static int
5640 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5641 {
5642 struct bnx2 *bp = netdev_priv(dev);
5643
5644 if (wol->wolopts & ~WAKE_MAGIC)
5645 return -EINVAL;
5646
5647 if (wol->wolopts & WAKE_MAGIC) {
5648 if (bp->flags & NO_WOL_FLAG)
5649 return -EINVAL;
5650
5651 bp->wol = 1;
5652 }
5653 else {
5654 bp->wol = 0;
5655 }
5656 return 0;
5657 }
5658
5659 static int
5660 bnx2_nway_reset(struct net_device *dev)
5661 {
5662 struct bnx2 *bp = netdev_priv(dev);
5663 u32 bmcr;
5664
5665 if (!(bp->autoneg & AUTONEG_SPEED)) {
5666 return -EINVAL;
5667 }
5668
5669 spin_lock_bh(&bp->phy_lock);
5670
5671 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5672 int rc;
5673
5674 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5675 spin_unlock_bh(&bp->phy_lock);
5676 return rc;
5677 }
5678
5679 /* Force a link down visible on the other side */
5680 if (bp->phy_flags & PHY_SERDES_FLAG) {
5681 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5682 spin_unlock_bh(&bp->phy_lock);
5683
5684 msleep(20);
5685
5686 spin_lock_bh(&bp->phy_lock);
5687
5688 bp->current_interval = SERDES_AN_TIMEOUT;
5689 bp->serdes_an_pending = 1;
5690 mod_timer(&bp->timer, jiffies + bp->current_interval);
5691 }
5692
5693 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5694 bmcr &= ~BMCR_LOOPBACK;
5695 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5696
5697 spin_unlock_bh(&bp->phy_lock);
5698
5699 return 0;
5700 }
5701
5702 static int
5703 bnx2_get_eeprom_len(struct net_device *dev)
5704 {
5705 struct bnx2 *bp = netdev_priv(dev);
5706
5707 if (bp->flash_info == NULL)
5708 return 0;
5709
5710 return (int) bp->flash_size;
5711 }
5712
5713 static int
5714 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5715 u8 *eebuf)
5716 {
5717 struct bnx2 *bp = netdev_priv(dev);
5718 int rc;
5719
5720 /* parameters already validated in ethtool_get_eeprom */
5721
5722 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5723
5724 return rc;
5725 }
5726
5727 static int
5728 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5729 u8 *eebuf)
5730 {
5731 struct bnx2 *bp = netdev_priv(dev);
5732 int rc;
5733
5734 /* parameters already validated in ethtool_set_eeprom */
5735
5736 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5737
5738 return rc;
5739 }
5740
5741 static int
5742 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5743 {
5744 struct bnx2 *bp = netdev_priv(dev);
5745
5746 memset(coal, 0, sizeof(struct ethtool_coalesce));
5747
5748 coal->rx_coalesce_usecs = bp->rx_ticks;
5749 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5750 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5751 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5752
5753 coal->tx_coalesce_usecs = bp->tx_ticks;
5754 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5755 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5756 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5757
5758 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5759
5760 return 0;
5761 }
5762
5763 static int
5764 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5765 {
5766 struct bnx2 *bp = netdev_priv(dev);
5767
5768 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5769 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5770
5771 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5772 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5773
5774 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5775 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5776
5777 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5778 if (bp->rx_quick_cons_trip_int > 0xff)
5779 bp->rx_quick_cons_trip_int = 0xff;
5780
5781 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5782 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5783
5784 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5785 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5786
5787 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5788 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5789
5790 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5791 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5792 0xff;
5793
5794 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5795 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5796 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5797 bp->stats_ticks = USEC_PER_SEC;
5798 }
5799 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5800 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5801 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5802
5803 if (netif_running(bp->dev)) {
5804 bnx2_netif_stop(bp);
5805 bnx2_init_nic(bp);
5806 bnx2_netif_start(bp);
5807 }
5808
5809 return 0;
5810 }
5811
5812 static void
5813 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5814 {
5815 struct bnx2 *bp = netdev_priv(dev);
5816
5817 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5818 ering->rx_mini_max_pending = 0;
5819 ering->rx_jumbo_max_pending = 0;
5820
5821 ering->rx_pending = bp->rx_ring_size;
5822 ering->rx_mini_pending = 0;
5823 ering->rx_jumbo_pending = 0;
5824
5825 ering->tx_max_pending = MAX_TX_DESC_CNT;
5826 ering->tx_pending = bp->tx_ring_size;
5827 }
5828
5829 static int
5830 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5831 {
5832 struct bnx2 *bp = netdev_priv(dev);
5833
5834 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5835 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5836 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5837
5838 return -EINVAL;
5839 }
5840 if (netif_running(bp->dev)) {
5841 bnx2_netif_stop(bp);
5842 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5843 bnx2_free_skbs(bp);
5844 bnx2_free_mem(bp);
5845 }
5846
5847 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5848 bp->tx_ring_size = ering->tx_pending;
5849
5850 if (netif_running(bp->dev)) {
5851 int rc;
5852
5853 rc = bnx2_alloc_mem(bp);
5854 if (rc)
5855 return rc;
5856 bnx2_init_nic(bp);
5857 bnx2_netif_start(bp);
5858 }
5859
5860 return 0;
5861 }
5862
5863 static void
5864 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5865 {
5866 struct bnx2 *bp = netdev_priv(dev);
5867
5868 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5869 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5870 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5871 }
5872
5873 static int
5874 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5875 {
5876 struct bnx2 *bp = netdev_priv(dev);
5877
5878 bp->req_flow_ctrl = 0;
5879 if (epause->rx_pause)
5880 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5881 if (epause->tx_pause)
5882 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5883
5884 if (epause->autoneg) {
5885 bp->autoneg |= AUTONEG_FLOW_CTRL;
5886 }
5887 else {
5888 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5889 }
5890
5891 spin_lock_bh(&bp->phy_lock);
5892
5893 bnx2_setup_phy(bp, bp->phy_port);
5894
5895 spin_unlock_bh(&bp->phy_lock);
5896
5897 return 0;
5898 }
5899
5900 static u32
5901 bnx2_get_rx_csum(struct net_device *dev)
5902 {
5903 struct bnx2 *bp = netdev_priv(dev);
5904
5905 return bp->rx_csum;
5906 }
5907
5908 static int
5909 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5910 {
5911 struct bnx2 *bp = netdev_priv(dev);
5912
5913 bp->rx_csum = data;
5914 return 0;
5915 }
5916
5917 static int
5918 bnx2_set_tso(struct net_device *dev, u32 data)
5919 {
5920 struct bnx2 *bp = netdev_priv(dev);
5921
5922 if (data) {
5923 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5924 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5925 dev->features |= NETIF_F_TSO6;
5926 } else
5927 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5928 NETIF_F_TSO_ECN);
5929 return 0;
5930 }
5931
5932 #define BNX2_NUM_STATS 46
5933
5934 static struct {
5935 char string[ETH_GSTRING_LEN];
5936 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5937 { "rx_bytes" },
5938 { "rx_error_bytes" },
5939 { "tx_bytes" },
5940 { "tx_error_bytes" },
5941 { "rx_ucast_packets" },
5942 { "rx_mcast_packets" },
5943 { "rx_bcast_packets" },
5944 { "tx_ucast_packets" },
5945 { "tx_mcast_packets" },
5946 { "tx_bcast_packets" },
5947 { "tx_mac_errors" },
5948 { "tx_carrier_errors" },
5949 { "rx_crc_errors" },
5950 { "rx_align_errors" },
5951 { "tx_single_collisions" },
5952 { "tx_multi_collisions" },
5953 { "tx_deferred" },
5954 { "tx_excess_collisions" },
5955 { "tx_late_collisions" },
5956 { "tx_total_collisions" },
5957 { "rx_fragments" },
5958 { "rx_jabbers" },
5959 { "rx_undersize_packets" },
5960 { "rx_oversize_packets" },
5961 { "rx_64_byte_packets" },
5962 { "rx_65_to_127_byte_packets" },
5963 { "rx_128_to_255_byte_packets" },
5964 { "rx_256_to_511_byte_packets" },
5965 { "rx_512_to_1023_byte_packets" },
5966 { "rx_1024_to_1522_byte_packets" },
5967 { "rx_1523_to_9022_byte_packets" },
5968 { "tx_64_byte_packets" },
5969 { "tx_65_to_127_byte_packets" },
5970 { "tx_128_to_255_byte_packets" },
5971 { "tx_256_to_511_byte_packets" },
5972 { "tx_512_to_1023_byte_packets" },
5973 { "tx_1024_to_1522_byte_packets" },
5974 { "tx_1523_to_9022_byte_packets" },
5975 { "rx_xon_frames" },
5976 { "rx_xoff_frames" },
5977 { "tx_xon_frames" },
5978 { "tx_xoff_frames" },
5979 { "rx_mac_ctrl_frames" },
5980 { "rx_filtered_packets" },
5981 { "rx_discards" },
5982 { "rx_fw_discards" },
5983 };
5984
5985 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5986
5987 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5988 STATS_OFFSET32(stat_IfHCInOctets_hi),
5989 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5990 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5991 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5992 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5993 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5994 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5995 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5996 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5997 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5998 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5999 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6000 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6001 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6002 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6003 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6004 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6005 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6006 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6007 STATS_OFFSET32(stat_EtherStatsCollisions),
6008 STATS_OFFSET32(stat_EtherStatsFragments),
6009 STATS_OFFSET32(stat_EtherStatsJabbers),
6010 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6011 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6012 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6013 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6014 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6015 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6016 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6017 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6018 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6019 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6020 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6021 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6022 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6023 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6024 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6025 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6026 STATS_OFFSET32(stat_XonPauseFramesReceived),
6027 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6028 STATS_OFFSET32(stat_OutXonSent),
6029 STATS_OFFSET32(stat_OutXoffSent),
6030 STATS_OFFSET32(stat_MacControlFramesReceived),
6031 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6032 STATS_OFFSET32(stat_IfInMBUFDiscards),
6033 STATS_OFFSET32(stat_FwRxDrop),
6034 };
6035
6036 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6037 * skipped because of errata.
6038 */
6039 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6040 8,0,8,8,8,8,8,8,8,8,
6041 4,0,4,4,4,4,4,4,4,4,
6042 4,4,4,4,4,4,4,4,4,4,
6043 4,4,4,4,4,4,4,4,4,4,
6044 4,4,4,4,4,4,
6045 };
6046
6047 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6048 8,0,8,8,8,8,8,8,8,8,
6049 4,4,4,4,4,4,4,4,4,4,
6050 4,4,4,4,4,4,4,4,4,4,
6051 4,4,4,4,4,4,4,4,4,4,
6052 4,4,4,4,4,4,
6053 };
6054
6055 #define BNX2_NUM_TESTS 6
6056
6057 static struct {
6058 char string[ETH_GSTRING_LEN];
6059 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6060 { "register_test (offline)" },
6061 { "memory_test (offline)" },
6062 { "loopback_test (offline)" },
6063 { "nvram_test (online)" },
6064 { "interrupt_test (online)" },
6065 { "link_test (online)" },
6066 };
6067
6068 static int
6069 bnx2_self_test_count(struct net_device *dev)
6070 {
6071 return BNX2_NUM_TESTS;
6072 }
6073
6074 static void
6075 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6076 {
6077 struct bnx2 *bp = netdev_priv(dev);
6078
6079 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6080 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6081 int i;
6082
6083 bnx2_netif_stop(bp);
6084 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6085 bnx2_free_skbs(bp);
6086
6087 if (bnx2_test_registers(bp) != 0) {
6088 buf[0] = 1;
6089 etest->flags |= ETH_TEST_FL_FAILED;
6090 }
6091 if (bnx2_test_memory(bp) != 0) {
6092 buf[1] = 1;
6093 etest->flags |= ETH_TEST_FL_FAILED;
6094 }
6095 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6096 etest->flags |= ETH_TEST_FL_FAILED;
6097
6098 if (!netif_running(bp->dev)) {
6099 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6100 }
6101 else {
6102 bnx2_init_nic(bp);
6103 bnx2_netif_start(bp);
6104 }
6105
6106 /* wait for link up */
6107 for (i = 0; i < 7; i++) {
6108 if (bp->link_up)
6109 break;
6110 msleep_interruptible(1000);
6111 }
6112 }
6113
6114 if (bnx2_test_nvram(bp) != 0) {
6115 buf[3] = 1;
6116 etest->flags |= ETH_TEST_FL_FAILED;
6117 }
6118 if (bnx2_test_intr(bp) != 0) {
6119 buf[4] = 1;
6120 etest->flags |= ETH_TEST_FL_FAILED;
6121 }
6122
6123 if (bnx2_test_link(bp) != 0) {
6124 buf[5] = 1;
6125 etest->flags |= ETH_TEST_FL_FAILED;
6126
6127 }
6128 }
6129
6130 static void
6131 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6132 {
6133 switch (stringset) {
6134 case ETH_SS_STATS:
6135 memcpy(buf, bnx2_stats_str_arr,
6136 sizeof(bnx2_stats_str_arr));
6137 break;
6138 case ETH_SS_TEST:
6139 memcpy(buf, bnx2_tests_str_arr,
6140 sizeof(bnx2_tests_str_arr));
6141 break;
6142 }
6143 }
6144
6145 static int
6146 bnx2_get_stats_count(struct net_device *dev)
6147 {
6148 return BNX2_NUM_STATS;
6149 }
6150
6151 static void
6152 bnx2_get_ethtool_stats(struct net_device *dev,
6153 struct ethtool_stats *stats, u64 *buf)
6154 {
6155 struct bnx2 *bp = netdev_priv(dev);
6156 int i;
6157 u32 *hw_stats = (u32 *) bp->stats_blk;
6158 u8 *stats_len_arr = NULL;
6159
6160 if (hw_stats == NULL) {
6161 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6162 return;
6163 }
6164
6165 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6166 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6167 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6168 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6169 stats_len_arr = bnx2_5706_stats_len_arr;
6170 else
6171 stats_len_arr = bnx2_5708_stats_len_arr;
6172
6173 for (i = 0; i < BNX2_NUM_STATS; i++) {
6174 if (stats_len_arr[i] == 0) {
6175 /* skip this counter */
6176 buf[i] = 0;
6177 continue;
6178 }
6179 if (stats_len_arr[i] == 4) {
6180 /* 4-byte counter */
6181 buf[i] = (u64)
6182 *(hw_stats + bnx2_stats_offset_arr[i]);
6183 continue;
6184 }
6185 /* 8-byte counter */
6186 buf[i] = (((u64) *(hw_stats +
6187 bnx2_stats_offset_arr[i])) << 32) +
6188 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6189 }
6190 }
6191
6192 static int
6193 bnx2_phys_id(struct net_device *dev, u32 data)
6194 {
6195 struct bnx2 *bp = netdev_priv(dev);
6196 int i;
6197 u32 save;
6198
6199 if (data == 0)
6200 data = 2;
6201
6202 save = REG_RD(bp, BNX2_MISC_CFG);
6203 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6204
6205 for (i = 0; i < (data * 2); i++) {
6206 if ((i % 2) == 0) {
6207 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6208 }
6209 else {
6210 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6211 BNX2_EMAC_LED_1000MB_OVERRIDE |
6212 BNX2_EMAC_LED_100MB_OVERRIDE |
6213 BNX2_EMAC_LED_10MB_OVERRIDE |
6214 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6215 BNX2_EMAC_LED_TRAFFIC);
6216 }
6217 msleep_interruptible(500);
6218 if (signal_pending(current))
6219 break;
6220 }
6221 REG_WR(bp, BNX2_EMAC_LED, 0);
6222 REG_WR(bp, BNX2_MISC_CFG, save);
6223 return 0;
6224 }
6225
6226 static int
6227 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6228 {
6229 struct bnx2 *bp = netdev_priv(dev);
6230
6231 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6232 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6233 else
6234 return (ethtool_op_set_tx_csum(dev, data));
6235 }
6236
6237 static const struct ethtool_ops bnx2_ethtool_ops = {
6238 .get_settings = bnx2_get_settings,
6239 .set_settings = bnx2_set_settings,
6240 .get_drvinfo = bnx2_get_drvinfo,
6241 .get_regs_len = bnx2_get_regs_len,
6242 .get_regs = bnx2_get_regs,
6243 .get_wol = bnx2_get_wol,
6244 .set_wol = bnx2_set_wol,
6245 .nway_reset = bnx2_nway_reset,
6246 .get_link = ethtool_op_get_link,
6247 .get_eeprom_len = bnx2_get_eeprom_len,
6248 .get_eeprom = bnx2_get_eeprom,
6249 .set_eeprom = bnx2_set_eeprom,
6250 .get_coalesce = bnx2_get_coalesce,
6251 .set_coalesce = bnx2_set_coalesce,
6252 .get_ringparam = bnx2_get_ringparam,
6253 .set_ringparam = bnx2_set_ringparam,
6254 .get_pauseparam = bnx2_get_pauseparam,
6255 .set_pauseparam = bnx2_set_pauseparam,
6256 .get_rx_csum = bnx2_get_rx_csum,
6257 .set_rx_csum = bnx2_set_rx_csum,
6258 .get_tx_csum = ethtool_op_get_tx_csum,
6259 .set_tx_csum = bnx2_set_tx_csum,
6260 .get_sg = ethtool_op_get_sg,
6261 .set_sg = ethtool_op_set_sg,
6262 .get_tso = ethtool_op_get_tso,
6263 .set_tso = bnx2_set_tso,
6264 .self_test_count = bnx2_self_test_count,
6265 .self_test = bnx2_self_test,
6266 .get_strings = bnx2_get_strings,
6267 .phys_id = bnx2_phys_id,
6268 .get_stats_count = bnx2_get_stats_count,
6269 .get_ethtool_stats = bnx2_get_ethtool_stats,
6270 };
6271
6272 /* Called with rtnl_lock */
6273 static int
6274 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6275 {
6276 struct mii_ioctl_data *data = if_mii(ifr);
6277 struct bnx2 *bp = netdev_priv(dev);
6278 int err;
6279
6280 switch(cmd) {
6281 case SIOCGMIIPHY:
6282 data->phy_id = bp->phy_addr;
6283
6284 /* fallthru */
6285 case SIOCGMIIREG: {
6286 u32 mii_regval;
6287
6288 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6289 return -EOPNOTSUPP;
6290
6291 if (!netif_running(dev))
6292 return -EAGAIN;
6293
6294 spin_lock_bh(&bp->phy_lock);
6295 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6296 spin_unlock_bh(&bp->phy_lock);
6297
6298 data->val_out = mii_regval;
6299
6300 return err;
6301 }
6302
6303 case SIOCSMIIREG:
6304 if (!capable(CAP_NET_ADMIN))
6305 return -EPERM;
6306
6307 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6308 return -EOPNOTSUPP;
6309
6310 if (!netif_running(dev))
6311 return -EAGAIN;
6312
6313 spin_lock_bh(&bp->phy_lock);
6314 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6315 spin_unlock_bh(&bp->phy_lock);
6316
6317 return err;
6318
6319 default:
6320 /* do nothing */
6321 break;
6322 }
6323 return -EOPNOTSUPP;
6324 }
6325
6326 /* Called with rtnl_lock */
6327 static int
6328 bnx2_change_mac_addr(struct net_device *dev, void *p)
6329 {
6330 struct sockaddr *addr = p;
6331 struct bnx2 *bp = netdev_priv(dev);
6332
6333 if (!is_valid_ether_addr(addr->sa_data))
6334 return -EINVAL;
6335
6336 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6337 if (netif_running(dev))
6338 bnx2_set_mac_addr(bp);
6339
6340 return 0;
6341 }
6342
6343 /* Called with rtnl_lock */
6344 static int
6345 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6346 {
6347 struct bnx2 *bp = netdev_priv(dev);
6348
6349 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6350 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6351 return -EINVAL;
6352
6353 dev->mtu = new_mtu;
6354 if (netif_running(dev)) {
6355 bnx2_netif_stop(bp);
6356
6357 bnx2_init_nic(bp);
6358
6359 bnx2_netif_start(bp);
6360 }
6361 return 0;
6362 }
6363
6364 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6365 static void
6366 poll_bnx2(struct net_device *dev)
6367 {
6368 struct bnx2 *bp = netdev_priv(dev);
6369
6370 disable_irq(bp->pdev->irq);
6371 bnx2_interrupt(bp->pdev->irq, dev);
6372 enable_irq(bp->pdev->irq);
6373 }
6374 #endif
6375
6376 static void __devinit
6377 bnx2_get_5709_media(struct bnx2 *bp)
6378 {
6379 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6380 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6381 u32 strap;
6382
6383 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6384 return;
6385 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6386 bp->phy_flags |= PHY_SERDES_FLAG;
6387 return;
6388 }
6389
6390 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6391 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6392 else
6393 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6394
6395 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6396 switch (strap) {
6397 case 0x4:
6398 case 0x5:
6399 case 0x6:
6400 bp->phy_flags |= PHY_SERDES_FLAG;
6401 return;
6402 }
6403 } else {
6404 switch (strap) {
6405 case 0x1:
6406 case 0x2:
6407 case 0x4:
6408 bp->phy_flags |= PHY_SERDES_FLAG;
6409 return;
6410 }
6411 }
6412 }
6413
6414 static void __devinit
6415 bnx2_get_pci_speed(struct bnx2 *bp)
6416 {
6417 u32 reg;
6418
6419 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6420 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6421 u32 clkreg;
6422
6423 bp->flags |= PCIX_FLAG;
6424
6425 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6426
6427 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6428 switch (clkreg) {
6429 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6430 bp->bus_speed_mhz = 133;
6431 break;
6432
6433 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6434 bp->bus_speed_mhz = 100;
6435 break;
6436
6437 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6438 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6439 bp->bus_speed_mhz = 66;
6440 break;
6441
6442 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6443 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6444 bp->bus_speed_mhz = 50;
6445 break;
6446
6447 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6448 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6449 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6450 bp->bus_speed_mhz = 33;
6451 break;
6452 }
6453 }
6454 else {
6455 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6456 bp->bus_speed_mhz = 66;
6457 else
6458 bp->bus_speed_mhz = 33;
6459 }
6460
6461 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6462 bp->flags |= PCI_32BIT_FLAG;
6463
6464 }
6465
6466 static int __devinit
6467 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6468 {
6469 struct bnx2 *bp;
6470 unsigned long mem_len;
6471 int rc, i, j;
6472 u32 reg;
6473 u64 dma_mask, persist_dma_mask;
6474
6475 SET_MODULE_OWNER(dev);
6476 SET_NETDEV_DEV(dev, &pdev->dev);
6477 bp = netdev_priv(dev);
6478
6479 bp->flags = 0;
6480 bp->phy_flags = 0;
6481
6482 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6483 rc = pci_enable_device(pdev);
6484 if (rc) {
6485 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6486 goto err_out;
6487 }
6488
6489 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6490 dev_err(&pdev->dev,
6491 "Cannot find PCI device base address, aborting.\n");
6492 rc = -ENODEV;
6493 goto err_out_disable;
6494 }
6495
6496 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6497 if (rc) {
6498 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6499 goto err_out_disable;
6500 }
6501
6502 pci_set_master(pdev);
6503
6504 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6505 if (bp->pm_cap == 0) {
6506 dev_err(&pdev->dev,
6507 "Cannot find power management capability, aborting.\n");
6508 rc = -EIO;
6509 goto err_out_release;
6510 }
6511
6512 bp->dev = dev;
6513 bp->pdev = pdev;
6514
6515 spin_lock_init(&bp->phy_lock);
6516 spin_lock_init(&bp->indirect_lock);
6517 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6518
6519 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6520 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6521 dev->mem_end = dev->mem_start + mem_len;
6522 dev->irq = pdev->irq;
6523
6524 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6525
6526 if (!bp->regview) {
6527 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6528 rc = -ENOMEM;
6529 goto err_out_release;
6530 }
6531
6532 /* Configure byte swap and enable write to the reg_window registers.
6533 * Rely on CPU to do target byte swapping on big endian systems
6534 * The chip's target access swapping will not swap all accesses
6535 */
6536 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6537 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6538 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6539
6540 bnx2_set_power_state(bp, PCI_D0);
6541
6542 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6543
6544 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6545 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6546 dev_err(&pdev->dev,
6547 "Cannot find PCIE capability, aborting.\n");
6548 rc = -EIO;
6549 goto err_out_unmap;
6550 }
6551 bp->flags |= PCIE_FLAG;
6552 } else {
6553 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6554 if (bp->pcix_cap == 0) {
6555 dev_err(&pdev->dev,
6556 "Cannot find PCIX capability, aborting.\n");
6557 rc = -EIO;
6558 goto err_out_unmap;
6559 }
6560 }
6561
6562 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6563 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6564 bp->flags |= MSI_CAP_FLAG;
6565 }
6566
6567 /* 5708 cannot support DMA addresses > 40-bit. */
6568 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6569 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6570 else
6571 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6572
6573 /* Configure DMA attributes. */
6574 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6575 dev->features |= NETIF_F_HIGHDMA;
6576 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6577 if (rc) {
6578 dev_err(&pdev->dev,
6579 "pci_set_consistent_dma_mask failed, aborting.\n");
6580 goto err_out_unmap;
6581 }
6582 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6583 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6584 goto err_out_unmap;
6585 }
6586
6587 if (!(bp->flags & PCIE_FLAG))
6588 bnx2_get_pci_speed(bp);
6589
6590 /* 5706A0 may falsely detect SERR and PERR. */
6591 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6592 reg = REG_RD(bp, PCI_COMMAND);
6593 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6594 REG_WR(bp, PCI_COMMAND, reg);
6595 }
6596 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6597 !(bp->flags & PCIX_FLAG)) {
6598
6599 dev_err(&pdev->dev,
6600 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6601 goto err_out_unmap;
6602 }
6603
6604 bnx2_init_nvram(bp);
6605
6606 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6607
6608 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6609 BNX2_SHM_HDR_SIGNATURE_SIG) {
6610 u32 off = PCI_FUNC(pdev->devfn) << 2;
6611
6612 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6613 } else
6614 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6615
6616 /* Get the permanent MAC address. First we need to make sure the
6617 * firmware is actually running.
6618 */
6619 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6620
6621 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6622 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6623 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6624 rc = -ENODEV;
6625 goto err_out_unmap;
6626 }
6627
6628 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6629 for (i = 0, j = 0; i < 3; i++) {
6630 u8 num, k, skip0;
6631
6632 num = (u8) (reg >> (24 - (i * 8)));
6633 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6634 if (num >= k || !skip0 || k == 1) {
6635 bp->fw_version[j++] = (num / k) + '0';
6636 skip0 = 0;
6637 }
6638 }
6639 if (i != 2)
6640 bp->fw_version[j++] = '.';
6641 }
6642 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
6643 BNX2_PORT_FEATURE_ASF_ENABLED) {
6644 bp->flags |= ASF_ENABLE_FLAG;
6645
6646 for (i = 0; i < 30; i++) {
6647 reg = REG_RD_IND(bp, bp->shmem_base +
6648 BNX2_BC_STATE_CONDITION);
6649 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6650 break;
6651 msleep(10);
6652 }
6653 }
6654 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6655 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6656 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6657 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6658 int i;
6659 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6660
6661 bp->fw_version[j++] = ' ';
6662 for (i = 0; i < 3; i++) {
6663 reg = REG_RD_IND(bp, addr + i * 4);
6664 reg = swab32(reg);
6665 memcpy(&bp->fw_version[j], &reg, 4);
6666 j += 4;
6667 }
6668 }
6669
6670 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6671 bp->mac_addr[0] = (u8) (reg >> 8);
6672 bp->mac_addr[1] = (u8) reg;
6673
6674 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6675 bp->mac_addr[2] = (u8) (reg >> 24);
6676 bp->mac_addr[3] = (u8) (reg >> 16);
6677 bp->mac_addr[4] = (u8) (reg >> 8);
6678 bp->mac_addr[5] = (u8) reg;
6679
6680 bp->tx_ring_size = MAX_TX_DESC_CNT;
6681 bnx2_set_rx_ring_size(bp, 255);
6682
6683 bp->rx_csum = 1;
6684
6685 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6686
6687 bp->tx_quick_cons_trip_int = 20;
6688 bp->tx_quick_cons_trip = 20;
6689 bp->tx_ticks_int = 80;
6690 bp->tx_ticks = 80;
6691
6692 bp->rx_quick_cons_trip_int = 6;
6693 bp->rx_quick_cons_trip = 6;
6694 bp->rx_ticks_int = 18;
6695 bp->rx_ticks = 18;
6696
6697 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6698
6699 bp->timer_interval = HZ;
6700 bp->current_interval = HZ;
6701
6702 bp->phy_addr = 1;
6703
6704 /* Disable WOL support if we are running on a SERDES chip. */
6705 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6706 bnx2_get_5709_media(bp);
6707 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6708 bp->phy_flags |= PHY_SERDES_FLAG;
6709
6710 bp->phy_port = PORT_TP;
6711 if (bp->phy_flags & PHY_SERDES_FLAG) {
6712 bp->phy_port = PORT_FIBRE;
6713 bp->flags |= NO_WOL_FLAG;
6714 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6715 bp->phy_addr = 2;
6716 reg = REG_RD_IND(bp, bp->shmem_base +
6717 BNX2_SHARED_HW_CFG_CONFIG);
6718 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6719 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6720 }
6721 bnx2_init_remote_phy(bp);
6722
6723 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6724 CHIP_NUM(bp) == CHIP_NUM_5708)
6725 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6726 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6727 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6728
6729 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6730 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6731 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6732 bp->flags |= NO_WOL_FLAG;
6733
6734 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6735 bp->tx_quick_cons_trip_int =
6736 bp->tx_quick_cons_trip;
6737 bp->tx_ticks_int = bp->tx_ticks;
6738 bp->rx_quick_cons_trip_int =
6739 bp->rx_quick_cons_trip;
6740 bp->rx_ticks_int = bp->rx_ticks;
6741 bp->comp_prod_trip_int = bp->comp_prod_trip;
6742 bp->com_ticks_int = bp->com_ticks;
6743 bp->cmd_ticks_int = bp->cmd_ticks;
6744 }
6745
6746 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6747 *
6748 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6749 * with byte enables disabled on the unused 32-bit word. This is legal
6750 * but causes problems on the AMD 8132 which will eventually stop
6751 * responding after a while.
6752 *
6753 * AMD believes this incompatibility is unique to the 5706, and
6754 * prefers to locally disable MSI rather than globally disabling it.
6755 */
6756 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6757 struct pci_dev *amd_8132 = NULL;
6758
6759 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6760 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6761 amd_8132))) {
6762
6763 if (amd_8132->revision >= 0x10 &&
6764 amd_8132->revision <= 0x13) {
6765 disable_msi = 1;
6766 pci_dev_put(amd_8132);
6767 break;
6768 }
6769 }
6770 }
6771
6772 bnx2_set_default_link(bp);
6773 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6774
6775 init_timer(&bp->timer);
6776 bp->timer.expires = RUN_AT(bp->timer_interval);
6777 bp->timer.data = (unsigned long) bp;
6778 bp->timer.function = bnx2_timer;
6779
6780 return 0;
6781
6782 err_out_unmap:
6783 if (bp->regview) {
6784 iounmap(bp->regview);
6785 bp->regview = NULL;
6786 }
6787
6788 err_out_release:
6789 pci_release_regions(pdev);
6790
6791 err_out_disable:
6792 pci_disable_device(pdev);
6793 pci_set_drvdata(pdev, NULL);
6794
6795 err_out:
6796 return rc;
6797 }
6798
6799 static char * __devinit
6800 bnx2_bus_string(struct bnx2 *bp, char *str)
6801 {
6802 char *s = str;
6803
6804 if (bp->flags & PCIE_FLAG) {
6805 s += sprintf(s, "PCI Express");
6806 } else {
6807 s += sprintf(s, "PCI");
6808 if (bp->flags & PCIX_FLAG)
6809 s += sprintf(s, "-X");
6810 if (bp->flags & PCI_32BIT_FLAG)
6811 s += sprintf(s, " 32-bit");
6812 else
6813 s += sprintf(s, " 64-bit");
6814 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6815 }
6816 return str;
6817 }
6818
6819 static int __devinit
6820 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6821 {
6822 static int version_printed = 0;
6823 struct net_device *dev = NULL;
6824 struct bnx2 *bp;
6825 int rc, i;
6826 char str[40];
6827
6828 if (version_printed++ == 0)
6829 printk(KERN_INFO "%s", version);
6830
6831 /* dev zeroed in init_etherdev */
6832 dev = alloc_etherdev(sizeof(*bp));
6833
6834 if (!dev)
6835 return -ENOMEM;
6836
6837 rc = bnx2_init_board(pdev, dev);
6838 if (rc < 0) {
6839 free_netdev(dev);
6840 return rc;
6841 }
6842
6843 dev->open = bnx2_open;
6844 dev->hard_start_xmit = bnx2_start_xmit;
6845 dev->stop = bnx2_close;
6846 dev->get_stats = bnx2_get_stats;
6847 dev->set_multicast_list = bnx2_set_rx_mode;
6848 dev->do_ioctl = bnx2_ioctl;
6849 dev->set_mac_address = bnx2_change_mac_addr;
6850 dev->change_mtu = bnx2_change_mtu;
6851 dev->tx_timeout = bnx2_tx_timeout;
6852 dev->watchdog_timeo = TX_TIMEOUT;
6853 #ifdef BCM_VLAN
6854 dev->vlan_rx_register = bnx2_vlan_rx_register;
6855 #endif
6856 dev->poll = bnx2_poll;
6857 dev->ethtool_ops = &bnx2_ethtool_ops;
6858 dev->weight = 64;
6859
6860 bp = netdev_priv(dev);
6861
6862 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6863 dev->poll_controller = poll_bnx2;
6864 #endif
6865
6866 pci_set_drvdata(pdev, dev);
6867
6868 memcpy(dev->dev_addr, bp->mac_addr, 6);
6869 memcpy(dev->perm_addr, bp->mac_addr, 6);
6870 bp->name = board_info[ent->driver_data].name;
6871
6872 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6873 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6874 dev->features |= NETIF_F_IPV6_CSUM;
6875
6876 #ifdef BCM_VLAN
6877 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6878 #endif
6879 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6880 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6881 dev->features |= NETIF_F_TSO6;
6882
6883 if ((rc = register_netdev(dev))) {
6884 dev_err(&pdev->dev, "Cannot register net device\n");
6885 if (bp->regview)
6886 iounmap(bp->regview);
6887 pci_release_regions(pdev);
6888 pci_disable_device(pdev);
6889 pci_set_drvdata(pdev, NULL);
6890 free_netdev(dev);
6891 return rc;
6892 }
6893
6894 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6895 "IRQ %d, ",
6896 dev->name,
6897 bp->name,
6898 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6899 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6900 bnx2_bus_string(bp, str),
6901 dev->base_addr,
6902 bp->pdev->irq);
6903
6904 printk("node addr ");
6905 for (i = 0; i < 6; i++)
6906 printk("%2.2x", dev->dev_addr[i]);
6907 printk("\n");
6908
6909 return 0;
6910 }
6911
6912 static void __devexit
6913 bnx2_remove_one(struct pci_dev *pdev)
6914 {
6915 struct net_device *dev = pci_get_drvdata(pdev);
6916 struct bnx2 *bp = netdev_priv(dev);
6917
6918 flush_scheduled_work();
6919
6920 unregister_netdev(dev);
6921
6922 if (bp->regview)
6923 iounmap(bp->regview);
6924
6925 free_netdev(dev);
6926 pci_release_regions(pdev);
6927 pci_disable_device(pdev);
6928 pci_set_drvdata(pdev, NULL);
6929 }
6930
6931 static int
6932 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6933 {
6934 struct net_device *dev = pci_get_drvdata(pdev);
6935 struct bnx2 *bp = netdev_priv(dev);
6936 u32 reset_code;
6937
6938 /* PCI register 4 needs to be saved whether netif_running() or not.
6939 * MSI address and data need to be saved if using MSI and
6940 * netif_running().
6941 */
6942 pci_save_state(pdev);
6943 if (!netif_running(dev))
6944 return 0;
6945
6946 flush_scheduled_work();
6947 bnx2_netif_stop(bp);
6948 netif_device_detach(dev);
6949 del_timer_sync(&bp->timer);
6950 if (bp->flags & NO_WOL_FLAG)
6951 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6952 else if (bp->wol)
6953 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6954 else
6955 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6956 bnx2_reset_chip(bp, reset_code);
6957 bnx2_free_skbs(bp);
6958 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6959 return 0;
6960 }
6961
6962 static int
6963 bnx2_resume(struct pci_dev *pdev)
6964 {
6965 struct net_device *dev = pci_get_drvdata(pdev);
6966 struct bnx2 *bp = netdev_priv(dev);
6967
6968 pci_restore_state(pdev);
6969 if (!netif_running(dev))
6970 return 0;
6971
6972 bnx2_set_power_state(bp, PCI_D0);
6973 netif_device_attach(dev);
6974 bnx2_init_nic(bp);
6975 bnx2_netif_start(bp);
6976 return 0;
6977 }
6978
6979 static struct pci_driver bnx2_pci_driver = {
6980 .name = DRV_MODULE_NAME,
6981 .id_table = bnx2_pci_tbl,
6982 .probe = bnx2_init_one,
6983 .remove = __devexit_p(bnx2_remove_one),
6984 .suspend = bnx2_suspend,
6985 .resume = bnx2_resume,
6986 };
6987
6988 static int __init bnx2_init(void)
6989 {
6990 return pci_register_driver(&bnx2_pci_driver);
6991 }
6992
6993 static void __exit bnx2_cleanup(void)
6994 {
6995 pci_unregister_driver(&bnx2_pci_driver);
6996 }
6997
6998 module_init(bnx2_init);
6999 module_exit(bnx2_cleanup);
7000
7001
7002