dm mpath: rdac
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
72fbaeb6 3 * Copyright (c) 2004-2007 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
f2a4f052 42#include <net/ip.h>
f2a4f052 43#include <net/checksum.h>
f2a4f052
MC
44#include <linux/workqueue.h>
45#include <linux/crc32.h>
46#include <linux/prefetch.h>
29b12174 47#include <linux/cache.h>
fba9fe91 48#include <linux/zlib.h>
f2a4f052 49
b6016b76
MC
50#include "bnx2.h"
51#include "bnx2_fw.h"
d43584c8 52#include "bnx2_fw2.h"
b6016b76
MC
53
54#define DRV_MODULE_NAME "bnx2"
55#define PFX DRV_MODULE_NAME ": "
3a334b34
MC
56#define DRV_MODULE_VERSION "1.6.2"
57#define DRV_MODULE_RELDATE "July 6, 2007"
b6016b76
MC
58
59#define RUN_AT(x) (jiffies + (x))
60
61/* Time in jiffies before concluding the transmitter is hung. */
62#define TX_TIMEOUT (5*HZ)
63
e19360f2 64static const char version[] __devinitdata =
b6016b76
MC
65 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
66
67MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 68MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
69MODULE_LICENSE("GPL");
70MODULE_VERSION(DRV_MODULE_VERSION);
71
72static int disable_msi = 0;
73
74module_param(disable_msi, int, 0);
75MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
76
77typedef enum {
78 BCM5706 = 0,
79 NC370T,
80 NC370I,
81 BCM5706S,
82 NC370F,
5b0c76ad
MC
83 BCM5708,
84 BCM5708S,
bac0dff6 85 BCM5709,
27a005b8 86 BCM5709S,
b6016b76
MC
87} board_t;
88
89/* indexed by board_t, above */
f71e1309 90static const struct {
b6016b76
MC
91 char *name;
92} board_info[] __devinitdata = {
93 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
94 { "HP NC370T Multifunction Gigabit Server Adapter" },
95 { "HP NC370i Multifunction Gigabit Server Adapter" },
96 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
97 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
98 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 100 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 101 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
b6016b76
MC
102 };
103
104static struct pci_device_id bnx2_pci_tbl[] = {
105 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
106 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
107 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
108 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
109 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
111 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
114 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
b6016b76
MC
123 { 0, }
124};
125
126static struct flash_spec flash_table[] =
127{
128 /* Slow EEPROM */
37137709 129 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
b6016b76
MC
130 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
131 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
132 "EEPROM - slow"},
37137709
MC
133 /* Expansion entry 0001 */
134 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76 135 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
136 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
137 "Entry 0001"},
b6016b76
MC
138 /* Saifun SA25F010 (non-buffered flash) */
139 /* strap, cfg1, & write1 need updates */
37137709 140 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
141 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
142 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
143 "Non-buffered flash (128kB)"},
144 /* Saifun SA25F020 (non-buffered flash) */
145 /* strap, cfg1, & write1 need updates */
37137709 146 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
149 "Non-buffered flash (256kB)"},
37137709
MC
150 /* Expansion entry 0100 */
151 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
152 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
154 "Entry 0100"},
155 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 156 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
37137709
MC
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
159 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
160 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
161 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
162 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
164 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
165 /* Saifun SA25F005 (non-buffered flash) */
166 /* strap, cfg1, & write1 need updates */
167 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
168 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
170 "Non-buffered flash (64kB)"},
171 /* Fast EEPROM */
172 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
173 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
174 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
175 "EEPROM - fast"},
176 /* Expansion entry 1001 */
177 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 1001"},
181 /* Expansion entry 1010 */
182 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
183 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1010"},
186 /* ATMEL AT45DB011B (buffered flash) */
187 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
188 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
189 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
190 "Buffered flash (128kB)"},
191 /* Expansion entry 1100 */
192 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1100"},
196 /* Expansion entry 1101 */
197 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
198 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1101"},
201 /* Ateml Expansion entry 1110 */
202 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1110 (Atmel)"},
206 /* ATMEL AT45DB021B (buffered flash) */
207 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
208 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
210 "Buffered flash (256kB)"},
b6016b76
MC
211};
212
213MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
214
e89bbf10
MC
215static inline u32 bnx2_tx_avail(struct bnx2 *bp)
216{
2f8af120 217 u32 diff;
e89bbf10 218
2f8af120 219 smp_mb();
faac9c4b
MC
220
221 /* The ring uses 256 indices for 255 entries, one of them
222 * needs to be skipped.
223 */
224 diff = bp->tx_prod - bp->tx_cons;
225 if (unlikely(diff >= TX_DESC_CNT)) {
226 diff &= 0xffff;
227 if (diff == TX_DESC_CNT)
228 diff = MAX_TX_DESC_CNT;
229 }
e89bbf10
MC
230 return (bp->tx_ring_size - diff);
231}
232
b6016b76
MC
233static u32
234bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
235{
1b8227c4
MC
236 u32 val;
237
238 spin_lock_bh(&bp->indirect_lock);
b6016b76 239 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
240 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
241 spin_unlock_bh(&bp->indirect_lock);
242 return val;
b6016b76
MC
243}
244
245static void
246bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
247{
1b8227c4 248 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
249 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 251 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
252}
253
254static void
255bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
256{
257 offset += cid_addr;
1b8227c4 258 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
259 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
260 int i;
261
262 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
263 REG_WR(bp, BNX2_CTX_CTX_CTRL,
264 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
265 for (i = 0; i < 5; i++) {
266 u32 val;
267 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
268 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
269 break;
270 udelay(5);
271 }
272 } else {
273 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
274 REG_WR(bp, BNX2_CTX_DATA, val);
275 }
1b8227c4 276 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
277}
278
279static int
280bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
281{
282 u32 val1;
283 int i, ret;
284
285 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
286 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
287 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
288
289 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
290 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
291
292 udelay(40);
293 }
294
295 val1 = (bp->phy_addr << 21) | (reg << 16) |
296 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
297 BNX2_EMAC_MDIO_COMM_START_BUSY;
298 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
299
300 for (i = 0; i < 50; i++) {
301 udelay(10);
302
303 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
304 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
305 udelay(5);
306
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
308 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
309
310 break;
311 }
312 }
313
314 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
315 *val = 0x0;
316 ret = -EBUSY;
317 }
318 else {
319 *val = val1;
320 ret = 0;
321 }
322
323 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
325 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
326
327 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
328 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
329
330 udelay(40);
331 }
332
333 return ret;
334}
335
336static int
337bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
338{
339 u32 val1;
340 int i, ret;
341
342 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
343 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
344 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
345
346 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
347 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
348
349 udelay(40);
350 }
351
352 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
353 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
354 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
355 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 356
b6016b76
MC
357 for (i = 0; i < 50; i++) {
358 udelay(10);
359
360 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
361 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
362 udelay(5);
363 break;
364 }
365 }
366
367 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
368 ret = -EBUSY;
369 else
370 ret = 0;
371
372 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
373 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
375
376 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
377 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378
379 udelay(40);
380 }
381
382 return ret;
383}
384
385static void
386bnx2_disable_int(struct bnx2 *bp)
387{
388 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
389 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
390 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
391}
392
393static void
394bnx2_enable_int(struct bnx2 *bp)
395{
1269a8a6
MC
396 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
397 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
398 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
399
b6016b76
MC
400 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
401 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
402
bf5295bb 403 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
404}
405
406static void
407bnx2_disable_int_sync(struct bnx2 *bp)
408{
409 atomic_inc(&bp->intr_sem);
410 bnx2_disable_int(bp);
411 synchronize_irq(bp->pdev->irq);
412}
413
414static void
415bnx2_netif_stop(struct bnx2 *bp)
416{
417 bnx2_disable_int_sync(bp);
418 if (netif_running(bp->dev)) {
419 netif_poll_disable(bp->dev);
420 netif_tx_disable(bp->dev);
421 bp->dev->trans_start = jiffies; /* prevent tx timeout */
422 }
423}
424
425static void
426bnx2_netif_start(struct bnx2 *bp)
427{
428 if (atomic_dec_and_test(&bp->intr_sem)) {
429 if (netif_running(bp->dev)) {
430 netif_wake_queue(bp->dev);
431 netif_poll_enable(bp->dev);
432 bnx2_enable_int(bp);
433 }
434 }
435}
436
437static void
438bnx2_free_mem(struct bnx2 *bp)
439{
13daffa2
MC
440 int i;
441
59b47d8a
MC
442 for (i = 0; i < bp->ctx_pages; i++) {
443 if (bp->ctx_blk[i]) {
444 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
445 bp->ctx_blk[i],
446 bp->ctx_blk_mapping[i]);
447 bp->ctx_blk[i] = NULL;
448 }
449 }
b6016b76 450 if (bp->status_blk) {
0f31f994 451 pci_free_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
452 bp->status_blk, bp->status_blk_mapping);
453 bp->status_blk = NULL;
0f31f994 454 bp->stats_blk = NULL;
b6016b76
MC
455 }
456 if (bp->tx_desc_ring) {
457 pci_free_consistent(bp->pdev,
458 sizeof(struct tx_bd) * TX_DESC_CNT,
459 bp->tx_desc_ring, bp->tx_desc_mapping);
460 bp->tx_desc_ring = NULL;
461 }
b4558ea9
JJ
462 kfree(bp->tx_buf_ring);
463 bp->tx_buf_ring = NULL;
13daffa2
MC
464 for (i = 0; i < bp->rx_max_ring; i++) {
465 if (bp->rx_desc_ring[i])
466 pci_free_consistent(bp->pdev,
467 sizeof(struct rx_bd) * RX_DESC_CNT,
468 bp->rx_desc_ring[i],
469 bp->rx_desc_mapping[i]);
470 bp->rx_desc_ring[i] = NULL;
471 }
472 vfree(bp->rx_buf_ring);
b4558ea9 473 bp->rx_buf_ring = NULL;
b6016b76
MC
474}
475
476static int
477bnx2_alloc_mem(struct bnx2 *bp)
478{
0f31f994 479 int i, status_blk_size;
13daffa2 480
0f31f994
MC
481 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
482 GFP_KERNEL);
b6016b76
MC
483 if (bp->tx_buf_ring == NULL)
484 return -ENOMEM;
485
b6016b76
MC
486 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
487 sizeof(struct tx_bd) *
488 TX_DESC_CNT,
489 &bp->tx_desc_mapping);
490 if (bp->tx_desc_ring == NULL)
491 goto alloc_mem_err;
492
13daffa2
MC
493 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
494 bp->rx_max_ring);
b6016b76
MC
495 if (bp->rx_buf_ring == NULL)
496 goto alloc_mem_err;
497
13daffa2
MC
498 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
499 bp->rx_max_ring);
500
501 for (i = 0; i < bp->rx_max_ring; i++) {
502 bp->rx_desc_ring[i] =
503 pci_alloc_consistent(bp->pdev,
504 sizeof(struct rx_bd) * RX_DESC_CNT,
505 &bp->rx_desc_mapping[i]);
506 if (bp->rx_desc_ring[i] == NULL)
507 goto alloc_mem_err;
508
509 }
b6016b76 510
0f31f994
MC
511 /* Combine status and statistics blocks into one allocation. */
512 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
513 bp->status_stats_size = status_blk_size +
514 sizeof(struct statistics_block);
515
516 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
517 &bp->status_blk_mapping);
518 if (bp->status_blk == NULL)
519 goto alloc_mem_err;
520
0f31f994 521 memset(bp->status_blk, 0, bp->status_stats_size);
b6016b76 522
0f31f994
MC
523 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
524 status_blk_size);
b6016b76 525
0f31f994 526 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 527
59b47d8a
MC
528 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
529 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
530 if (bp->ctx_pages == 0)
531 bp->ctx_pages = 1;
532 for (i = 0; i < bp->ctx_pages; i++) {
533 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
534 BCM_PAGE_SIZE,
535 &bp->ctx_blk_mapping[i]);
536 if (bp->ctx_blk[i] == NULL)
537 goto alloc_mem_err;
538 }
539 }
b6016b76
MC
540 return 0;
541
542alloc_mem_err:
543 bnx2_free_mem(bp);
544 return -ENOMEM;
545}
546
e3648b3d
MC
547static void
548bnx2_report_fw_link(struct bnx2 *bp)
549{
550 u32 fw_link_status = 0;
551
0d8a6571
MC
552 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
553 return;
554
e3648b3d
MC
555 if (bp->link_up) {
556 u32 bmsr;
557
558 switch (bp->line_speed) {
559 case SPEED_10:
560 if (bp->duplex == DUPLEX_HALF)
561 fw_link_status = BNX2_LINK_STATUS_10HALF;
562 else
563 fw_link_status = BNX2_LINK_STATUS_10FULL;
564 break;
565 case SPEED_100:
566 if (bp->duplex == DUPLEX_HALF)
567 fw_link_status = BNX2_LINK_STATUS_100HALF;
568 else
569 fw_link_status = BNX2_LINK_STATUS_100FULL;
570 break;
571 case SPEED_1000:
572 if (bp->duplex == DUPLEX_HALF)
573 fw_link_status = BNX2_LINK_STATUS_1000HALF;
574 else
575 fw_link_status = BNX2_LINK_STATUS_1000FULL;
576 break;
577 case SPEED_2500:
578 if (bp->duplex == DUPLEX_HALF)
579 fw_link_status = BNX2_LINK_STATUS_2500HALF;
580 else
581 fw_link_status = BNX2_LINK_STATUS_2500FULL;
582 break;
583 }
584
585 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
586
587 if (bp->autoneg) {
588 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
589
ca58c3af
MC
590 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
591 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
592
593 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
594 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
595 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
596 else
597 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
598 }
599 }
600 else
601 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
602
603 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
604}
605
9b1084b8
MC
606static char *
607bnx2_xceiver_str(struct bnx2 *bp)
608{
609 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
610 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
611 "Copper"));
612}
613
b6016b76
MC
614static void
615bnx2_report_link(struct bnx2 *bp)
616{
617 if (bp->link_up) {
618 netif_carrier_on(bp->dev);
9b1084b8
MC
619 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
620 bnx2_xceiver_str(bp));
b6016b76
MC
621
622 printk("%d Mbps ", bp->line_speed);
623
624 if (bp->duplex == DUPLEX_FULL)
625 printk("full duplex");
626 else
627 printk("half duplex");
628
629 if (bp->flow_ctrl) {
630 if (bp->flow_ctrl & FLOW_CTRL_RX) {
631 printk(", receive ");
632 if (bp->flow_ctrl & FLOW_CTRL_TX)
633 printk("& transmit ");
634 }
635 else {
636 printk(", transmit ");
637 }
638 printk("flow control ON");
639 }
640 printk("\n");
641 }
642 else {
643 netif_carrier_off(bp->dev);
9b1084b8
MC
644 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
645 bnx2_xceiver_str(bp));
b6016b76 646 }
e3648b3d
MC
647
648 bnx2_report_fw_link(bp);
b6016b76
MC
649}
650
651static void
652bnx2_resolve_flow_ctrl(struct bnx2 *bp)
653{
654 u32 local_adv, remote_adv;
655
656 bp->flow_ctrl = 0;
6aa20a22 657 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
658 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
659
660 if (bp->duplex == DUPLEX_FULL) {
661 bp->flow_ctrl = bp->req_flow_ctrl;
662 }
663 return;
664 }
665
666 if (bp->duplex != DUPLEX_FULL) {
667 return;
668 }
669
5b0c76ad
MC
670 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
671 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
672 u32 val;
673
674 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
675 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
676 bp->flow_ctrl |= FLOW_CTRL_TX;
677 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
678 bp->flow_ctrl |= FLOW_CTRL_RX;
679 return;
680 }
681
ca58c3af
MC
682 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
683 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
684
685 if (bp->phy_flags & PHY_SERDES_FLAG) {
686 u32 new_local_adv = 0;
687 u32 new_remote_adv = 0;
688
689 if (local_adv & ADVERTISE_1000XPAUSE)
690 new_local_adv |= ADVERTISE_PAUSE_CAP;
691 if (local_adv & ADVERTISE_1000XPSE_ASYM)
692 new_local_adv |= ADVERTISE_PAUSE_ASYM;
693 if (remote_adv & ADVERTISE_1000XPAUSE)
694 new_remote_adv |= ADVERTISE_PAUSE_CAP;
695 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
696 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
697
698 local_adv = new_local_adv;
699 remote_adv = new_remote_adv;
700 }
701
702 /* See Table 28B-3 of 802.3ab-1999 spec. */
703 if (local_adv & ADVERTISE_PAUSE_CAP) {
704 if(local_adv & ADVERTISE_PAUSE_ASYM) {
705 if (remote_adv & ADVERTISE_PAUSE_CAP) {
706 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
707 }
708 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
709 bp->flow_ctrl = FLOW_CTRL_RX;
710 }
711 }
712 else {
713 if (remote_adv & ADVERTISE_PAUSE_CAP) {
714 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
715 }
716 }
717 }
718 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
719 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
720 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
721
722 bp->flow_ctrl = FLOW_CTRL_TX;
723 }
724 }
725}
726
27a005b8
MC
727static int
728bnx2_5709s_linkup(struct bnx2 *bp)
729{
730 u32 val, speed;
731
732 bp->link_up = 1;
733
734 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
735 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
736 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
737
738 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
739 bp->line_speed = bp->req_line_speed;
740 bp->duplex = bp->req_duplex;
741 return 0;
742 }
743 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
744 switch (speed) {
745 case MII_BNX2_GP_TOP_AN_SPEED_10:
746 bp->line_speed = SPEED_10;
747 break;
748 case MII_BNX2_GP_TOP_AN_SPEED_100:
749 bp->line_speed = SPEED_100;
750 break;
751 case MII_BNX2_GP_TOP_AN_SPEED_1G:
752 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
753 bp->line_speed = SPEED_1000;
754 break;
755 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
756 bp->line_speed = SPEED_2500;
757 break;
758 }
759 if (val & MII_BNX2_GP_TOP_AN_FD)
760 bp->duplex = DUPLEX_FULL;
761 else
762 bp->duplex = DUPLEX_HALF;
763 return 0;
764}
765
b6016b76 766static int
5b0c76ad
MC
767bnx2_5708s_linkup(struct bnx2 *bp)
768{
769 u32 val;
770
771 bp->link_up = 1;
772 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
773 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
774 case BCM5708S_1000X_STAT1_SPEED_10:
775 bp->line_speed = SPEED_10;
776 break;
777 case BCM5708S_1000X_STAT1_SPEED_100:
778 bp->line_speed = SPEED_100;
779 break;
780 case BCM5708S_1000X_STAT1_SPEED_1G:
781 bp->line_speed = SPEED_1000;
782 break;
783 case BCM5708S_1000X_STAT1_SPEED_2G5:
784 bp->line_speed = SPEED_2500;
785 break;
786 }
787 if (val & BCM5708S_1000X_STAT1_FD)
788 bp->duplex = DUPLEX_FULL;
789 else
790 bp->duplex = DUPLEX_HALF;
791
792 return 0;
793}
794
795static int
796bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
797{
798 u32 bmcr, local_adv, remote_adv, common;
799
800 bp->link_up = 1;
801 bp->line_speed = SPEED_1000;
802
ca58c3af 803 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
804 if (bmcr & BMCR_FULLDPLX) {
805 bp->duplex = DUPLEX_FULL;
806 }
807 else {
808 bp->duplex = DUPLEX_HALF;
809 }
810
811 if (!(bmcr & BMCR_ANENABLE)) {
812 return 0;
813 }
814
ca58c3af
MC
815 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
816 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
817
818 common = local_adv & remote_adv;
819 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
820
821 if (common & ADVERTISE_1000XFULL) {
822 bp->duplex = DUPLEX_FULL;
823 }
824 else {
825 bp->duplex = DUPLEX_HALF;
826 }
827 }
828
829 return 0;
830}
831
832static int
833bnx2_copper_linkup(struct bnx2 *bp)
834{
835 u32 bmcr;
836
ca58c3af 837 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
838 if (bmcr & BMCR_ANENABLE) {
839 u32 local_adv, remote_adv, common;
840
841 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
842 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
843
844 common = local_adv & (remote_adv >> 2);
845 if (common & ADVERTISE_1000FULL) {
846 bp->line_speed = SPEED_1000;
847 bp->duplex = DUPLEX_FULL;
848 }
849 else if (common & ADVERTISE_1000HALF) {
850 bp->line_speed = SPEED_1000;
851 bp->duplex = DUPLEX_HALF;
852 }
853 else {
ca58c3af
MC
854 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
855 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
856
857 common = local_adv & remote_adv;
858 if (common & ADVERTISE_100FULL) {
859 bp->line_speed = SPEED_100;
860 bp->duplex = DUPLEX_FULL;
861 }
862 else if (common & ADVERTISE_100HALF) {
863 bp->line_speed = SPEED_100;
864 bp->duplex = DUPLEX_HALF;
865 }
866 else if (common & ADVERTISE_10FULL) {
867 bp->line_speed = SPEED_10;
868 bp->duplex = DUPLEX_FULL;
869 }
870 else if (common & ADVERTISE_10HALF) {
871 bp->line_speed = SPEED_10;
872 bp->duplex = DUPLEX_HALF;
873 }
874 else {
875 bp->line_speed = 0;
876 bp->link_up = 0;
877 }
878 }
879 }
880 else {
881 if (bmcr & BMCR_SPEED100) {
882 bp->line_speed = SPEED_100;
883 }
884 else {
885 bp->line_speed = SPEED_10;
886 }
887 if (bmcr & BMCR_FULLDPLX) {
888 bp->duplex = DUPLEX_FULL;
889 }
890 else {
891 bp->duplex = DUPLEX_HALF;
892 }
893 }
894
895 return 0;
896}
897
898static int
899bnx2_set_mac_link(struct bnx2 *bp)
900{
901 u32 val;
902
903 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
904 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
905 (bp->duplex == DUPLEX_HALF)) {
906 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
907 }
908
909 /* Configure the EMAC mode register. */
910 val = REG_RD(bp, BNX2_EMAC_MODE);
911
912 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 913 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 914 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
915
916 if (bp->link_up) {
5b0c76ad
MC
917 switch (bp->line_speed) {
918 case SPEED_10:
59b47d8a
MC
919 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
920 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
921 break;
922 }
923 /* fall through */
924 case SPEED_100:
925 val |= BNX2_EMAC_MODE_PORT_MII;
926 break;
927 case SPEED_2500:
59b47d8a 928 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
929 /* fall through */
930 case SPEED_1000:
931 val |= BNX2_EMAC_MODE_PORT_GMII;
932 break;
933 }
b6016b76
MC
934 }
935 else {
936 val |= BNX2_EMAC_MODE_PORT_GMII;
937 }
938
939 /* Set the MAC to operate in the appropriate duplex mode. */
940 if (bp->duplex == DUPLEX_HALF)
941 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
942 REG_WR(bp, BNX2_EMAC_MODE, val);
943
944 /* Enable/disable rx PAUSE. */
945 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
946
947 if (bp->flow_ctrl & FLOW_CTRL_RX)
948 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
949 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
950
951 /* Enable/disable tx PAUSE. */
952 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
953 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
954
955 if (bp->flow_ctrl & FLOW_CTRL_TX)
956 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
957 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
958
959 /* Acknowledge the interrupt. */
960 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
961
962 return 0;
963}
964
27a005b8
MC
965static void
966bnx2_enable_bmsr1(struct bnx2 *bp)
967{
968 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
969 (CHIP_NUM(bp) == CHIP_NUM_5709))
970 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
971 MII_BNX2_BLK_ADDR_GP_STATUS);
972}
973
974static void
975bnx2_disable_bmsr1(struct bnx2 *bp)
976{
977 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
978 (CHIP_NUM(bp) == CHIP_NUM_5709))
979 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
980 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
981}
982
605a9e20
MC
983static int
984bnx2_test_and_enable_2g5(struct bnx2 *bp)
985{
986 u32 up1;
987 int ret = 1;
988
989 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
990 return 0;
991
992 if (bp->autoneg & AUTONEG_SPEED)
993 bp->advertising |= ADVERTISED_2500baseX_Full;
994
27a005b8
MC
995 if (CHIP_NUM(bp) == CHIP_NUM_5709)
996 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
997
605a9e20
MC
998 bnx2_read_phy(bp, bp->mii_up1, &up1);
999 if (!(up1 & BCM5708S_UP1_2G5)) {
1000 up1 |= BCM5708S_UP1_2G5;
1001 bnx2_write_phy(bp, bp->mii_up1, up1);
1002 ret = 0;
1003 }
1004
27a005b8
MC
1005 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1006 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1007 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1008
605a9e20
MC
1009 return ret;
1010}
1011
1012static int
1013bnx2_test_and_disable_2g5(struct bnx2 *bp)
1014{
1015 u32 up1;
1016 int ret = 0;
1017
1018 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1019 return 0;
1020
27a005b8
MC
1021 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1022 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1023
605a9e20
MC
1024 bnx2_read_phy(bp, bp->mii_up1, &up1);
1025 if (up1 & BCM5708S_UP1_2G5) {
1026 up1 &= ~BCM5708S_UP1_2G5;
1027 bnx2_write_phy(bp, bp->mii_up1, up1);
1028 ret = 1;
1029 }
1030
27a005b8
MC
1031 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1032 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1033 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1034
605a9e20
MC
1035 return ret;
1036}
1037
1038static void
1039bnx2_enable_forced_2g5(struct bnx2 *bp)
1040{
1041 u32 bmcr;
1042
1043 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1044 return;
1045
27a005b8
MC
1046 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1047 u32 val;
1048
1049 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1050 MII_BNX2_BLK_ADDR_SERDES_DIG);
1051 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1052 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1053 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1054 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1055
1056 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1057 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1058 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1059
1060 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1061 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1062 bmcr |= BCM5708S_BMCR_FORCE_2500;
1063 }
1064
1065 if (bp->autoneg & AUTONEG_SPEED) {
1066 bmcr &= ~BMCR_ANENABLE;
1067 if (bp->req_duplex == DUPLEX_FULL)
1068 bmcr |= BMCR_FULLDPLX;
1069 }
1070 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1071}
1072
1073static void
1074bnx2_disable_forced_2g5(struct bnx2 *bp)
1075{
1076 u32 bmcr;
1077
1078 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1079 return;
1080
27a005b8
MC
1081 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1082 u32 val;
1083
1084 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1085 MII_BNX2_BLK_ADDR_SERDES_DIG);
1086 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1087 val &= ~MII_BNX2_SD_MISC1_FORCE;
1088 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1089
1090 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1091 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1092 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1093
1094 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1095 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1096 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1097 }
1098
1099 if (bp->autoneg & AUTONEG_SPEED)
1100 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1101 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1102}
1103
b6016b76
MC
1104static int
1105bnx2_set_link(struct bnx2 *bp)
1106{
1107 u32 bmsr;
1108 u8 link_up;
1109
80be4434 1110 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1111 bp->link_up = 1;
1112 return 0;
1113 }
1114
0d8a6571
MC
1115 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1116 return 0;
1117
b6016b76
MC
1118 link_up = bp->link_up;
1119
27a005b8
MC
1120 bnx2_enable_bmsr1(bp);
1121 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1122 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1123 bnx2_disable_bmsr1(bp);
b6016b76
MC
1124
1125 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1126 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1127 u32 val;
1128
1129 val = REG_RD(bp, BNX2_EMAC_STATUS);
1130 if (val & BNX2_EMAC_STATUS_LINK)
1131 bmsr |= BMSR_LSTATUS;
1132 else
1133 bmsr &= ~BMSR_LSTATUS;
1134 }
1135
1136 if (bmsr & BMSR_LSTATUS) {
1137 bp->link_up = 1;
1138
1139 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1140 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1141 bnx2_5706s_linkup(bp);
1142 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1143 bnx2_5708s_linkup(bp);
27a005b8
MC
1144 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1145 bnx2_5709s_linkup(bp);
b6016b76
MC
1146 }
1147 else {
1148 bnx2_copper_linkup(bp);
1149 }
1150 bnx2_resolve_flow_ctrl(bp);
1151 }
1152 else {
1153 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
605a9e20
MC
1154 (bp->autoneg & AUTONEG_SPEED))
1155 bnx2_disable_forced_2g5(bp);
b6016b76 1156
b6016b76
MC
1157 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1158 bp->link_up = 0;
1159 }
1160
1161 if (bp->link_up != link_up) {
1162 bnx2_report_link(bp);
1163 }
1164
1165 bnx2_set_mac_link(bp);
1166
1167 return 0;
1168}
1169
1170static int
1171bnx2_reset_phy(struct bnx2 *bp)
1172{
1173 int i;
1174 u32 reg;
1175
ca58c3af 1176 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1177
1178#define PHY_RESET_MAX_WAIT 100
1179 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1180 udelay(10);
1181
ca58c3af 1182 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1183 if (!(reg & BMCR_RESET)) {
1184 udelay(20);
1185 break;
1186 }
1187 }
1188 if (i == PHY_RESET_MAX_WAIT) {
1189 return -EBUSY;
1190 }
1191 return 0;
1192}
1193
1194static u32
1195bnx2_phy_get_pause_adv(struct bnx2 *bp)
1196{
1197 u32 adv = 0;
1198
1199 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1200 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1201
1202 if (bp->phy_flags & PHY_SERDES_FLAG) {
1203 adv = ADVERTISE_1000XPAUSE;
1204 }
1205 else {
1206 adv = ADVERTISE_PAUSE_CAP;
1207 }
1208 }
1209 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1210 if (bp->phy_flags & PHY_SERDES_FLAG) {
1211 adv = ADVERTISE_1000XPSE_ASYM;
1212 }
1213 else {
1214 adv = ADVERTISE_PAUSE_ASYM;
1215 }
1216 }
1217 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1218 if (bp->phy_flags & PHY_SERDES_FLAG) {
1219 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1220 }
1221 else {
1222 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1223 }
1224 }
1225 return adv;
1226}
1227
0d8a6571
MC
1228static int bnx2_fw_sync(struct bnx2 *, u32, int);
1229
b6016b76 1230static int
0d8a6571
MC
1231bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1232{
1233 u32 speed_arg = 0, pause_adv;
1234
1235 pause_adv = bnx2_phy_get_pause_adv(bp);
1236
1237 if (bp->autoneg & AUTONEG_SPEED) {
1238 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1239 if (bp->advertising & ADVERTISED_10baseT_Half)
1240 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1241 if (bp->advertising & ADVERTISED_10baseT_Full)
1242 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1243 if (bp->advertising & ADVERTISED_100baseT_Half)
1244 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1245 if (bp->advertising & ADVERTISED_100baseT_Full)
1246 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1247 if (bp->advertising & ADVERTISED_1000baseT_Full)
1248 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1249 if (bp->advertising & ADVERTISED_2500baseX_Full)
1250 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1251 } else {
1252 if (bp->req_line_speed == SPEED_2500)
1253 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1254 else if (bp->req_line_speed == SPEED_1000)
1255 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1256 else if (bp->req_line_speed == SPEED_100) {
1257 if (bp->req_duplex == DUPLEX_FULL)
1258 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1259 else
1260 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1261 } else if (bp->req_line_speed == SPEED_10) {
1262 if (bp->req_duplex == DUPLEX_FULL)
1263 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1264 else
1265 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1266 }
1267 }
1268
1269 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1270 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1271 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1272 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1273
1274 if (port == PORT_TP)
1275 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1276 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1277
1278 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1279
1280 spin_unlock_bh(&bp->phy_lock);
1281 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1282 spin_lock_bh(&bp->phy_lock);
1283
1284 return 0;
1285}
1286
1287static int
1288bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
b6016b76 1289{
605a9e20 1290 u32 adv, bmcr;
b6016b76
MC
1291 u32 new_adv = 0;
1292
0d8a6571
MC
1293 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1294 return (bnx2_setup_remote_phy(bp, port));
1295
b6016b76
MC
1296 if (!(bp->autoneg & AUTONEG_SPEED)) {
1297 u32 new_bmcr;
5b0c76ad
MC
1298 int force_link_down = 0;
1299
605a9e20
MC
1300 if (bp->req_line_speed == SPEED_2500) {
1301 if (!bnx2_test_and_enable_2g5(bp))
1302 force_link_down = 1;
1303 } else if (bp->req_line_speed == SPEED_1000) {
1304 if (bnx2_test_and_disable_2g5(bp))
1305 force_link_down = 1;
1306 }
ca58c3af 1307 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1308 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1309
ca58c3af 1310 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1311 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1312 new_bmcr |= BMCR_SPEED1000;
605a9e20 1313
27a005b8
MC
1314 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1315 if (bp->req_line_speed == SPEED_2500)
1316 bnx2_enable_forced_2g5(bp);
1317 else if (bp->req_line_speed == SPEED_1000) {
1318 bnx2_disable_forced_2g5(bp);
1319 new_bmcr &= ~0x2000;
1320 }
1321
1322 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1323 if (bp->req_line_speed == SPEED_2500)
1324 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1325 else
1326 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1327 }
1328
b6016b76 1329 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1330 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1331 new_bmcr |= BMCR_FULLDPLX;
1332 }
1333 else {
5b0c76ad 1334 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1335 new_bmcr &= ~BMCR_FULLDPLX;
1336 }
5b0c76ad 1337 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1338 /* Force a link down visible on the other side */
1339 if (bp->link_up) {
ca58c3af 1340 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1341 ~(ADVERTISE_1000XFULL |
1342 ADVERTISE_1000XHALF));
ca58c3af 1343 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1344 BMCR_ANRESTART | BMCR_ANENABLE);
1345
1346 bp->link_up = 0;
1347 netif_carrier_off(bp->dev);
ca58c3af 1348 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1349 bnx2_report_link(bp);
b6016b76 1350 }
ca58c3af
MC
1351 bnx2_write_phy(bp, bp->mii_adv, adv);
1352 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1353 } else {
1354 bnx2_resolve_flow_ctrl(bp);
1355 bnx2_set_mac_link(bp);
b6016b76
MC
1356 }
1357 return 0;
1358 }
1359
605a9e20 1360 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1361
b6016b76
MC
1362 if (bp->advertising & ADVERTISED_1000baseT_Full)
1363 new_adv |= ADVERTISE_1000XFULL;
1364
1365 new_adv |= bnx2_phy_get_pause_adv(bp);
1366
ca58c3af
MC
1367 bnx2_read_phy(bp, bp->mii_adv, &adv);
1368 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1369
1370 bp->serdes_an_pending = 0;
1371 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1372 /* Force a link down visible on the other side */
1373 if (bp->link_up) {
ca58c3af 1374 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1375 spin_unlock_bh(&bp->phy_lock);
1376 msleep(20);
1377 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1378 }
1379
ca58c3af
MC
1380 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1381 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1382 BMCR_ANENABLE);
f8dd064e
MC
1383 /* Speed up link-up time when the link partner
1384 * does not autonegotiate which is very common
1385 * in blade servers. Some blade servers use
1386 * IPMI for kerboard input and it's important
1387 * to minimize link disruptions. Autoneg. involves
1388 * exchanging base pages plus 3 next pages and
1389 * normally completes in about 120 msec.
1390 */
1391 bp->current_interval = SERDES_AN_TIMEOUT;
1392 bp->serdes_an_pending = 1;
1393 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1394 } else {
1395 bnx2_resolve_flow_ctrl(bp);
1396 bnx2_set_mac_link(bp);
b6016b76
MC
1397 }
1398
1399 return 0;
1400}
1401
1402#define ETHTOOL_ALL_FIBRE_SPEED \
deaf391b
MC
1403 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1404 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1405 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1406
1407#define ETHTOOL_ALL_COPPER_SPEED \
1408 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1409 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1410 ADVERTISED_1000baseT_Full)
1411
1412#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1413 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1414
b6016b76
MC
1415#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1416
0d8a6571
MC
1417static void
1418bnx2_set_default_remote_link(struct bnx2 *bp)
1419{
1420 u32 link;
1421
1422 if (bp->phy_port == PORT_TP)
1423 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1424 else
1425 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1426
1427 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1428 bp->req_line_speed = 0;
1429 bp->autoneg |= AUTONEG_SPEED;
1430 bp->advertising = ADVERTISED_Autoneg;
1431 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1432 bp->advertising |= ADVERTISED_10baseT_Half;
1433 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1434 bp->advertising |= ADVERTISED_10baseT_Full;
1435 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1436 bp->advertising |= ADVERTISED_100baseT_Half;
1437 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1438 bp->advertising |= ADVERTISED_100baseT_Full;
1439 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1440 bp->advertising |= ADVERTISED_1000baseT_Full;
1441 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1442 bp->advertising |= ADVERTISED_2500baseX_Full;
1443 } else {
1444 bp->autoneg = 0;
1445 bp->advertising = 0;
1446 bp->req_duplex = DUPLEX_FULL;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1448 bp->req_line_speed = SPEED_10;
1449 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1450 bp->req_duplex = DUPLEX_HALF;
1451 }
1452 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1453 bp->req_line_speed = SPEED_100;
1454 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1455 bp->req_duplex = DUPLEX_HALF;
1456 }
1457 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1458 bp->req_line_speed = SPEED_1000;
1459 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1460 bp->req_line_speed = SPEED_2500;
1461 }
1462}
1463
deaf391b
MC
1464static void
1465bnx2_set_default_link(struct bnx2 *bp)
1466{
0d8a6571
MC
1467 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1468 return bnx2_set_default_remote_link(bp);
1469
deaf391b
MC
1470 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1471 bp->req_line_speed = 0;
1472 if (bp->phy_flags & PHY_SERDES_FLAG) {
1473 u32 reg;
1474
1475 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1476
1477 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1478 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1479 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1480 bp->autoneg = 0;
1481 bp->req_line_speed = bp->line_speed = SPEED_1000;
1482 bp->req_duplex = DUPLEX_FULL;
1483 }
1484 } else
1485 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1486}
1487
df149d70
MC
1488static void
1489bnx2_send_heart_beat(struct bnx2 *bp)
1490{
1491 u32 msg;
1492 u32 addr;
1493
1494 spin_lock(&bp->indirect_lock);
1495 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1496 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1497 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1498 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1499 spin_unlock(&bp->indirect_lock);
1500}
1501
0d8a6571
MC
1502static void
1503bnx2_remote_phy_event(struct bnx2 *bp)
1504{
1505 u32 msg;
1506 u8 link_up = bp->link_up;
1507 u8 old_port;
1508
1509 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1510
df149d70
MC
1511 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1512 bnx2_send_heart_beat(bp);
1513
1514 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1515
0d8a6571
MC
1516 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1517 bp->link_up = 0;
1518 else {
1519 u32 speed;
1520
1521 bp->link_up = 1;
1522 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1523 bp->duplex = DUPLEX_FULL;
1524 switch (speed) {
1525 case BNX2_LINK_STATUS_10HALF:
1526 bp->duplex = DUPLEX_HALF;
1527 case BNX2_LINK_STATUS_10FULL:
1528 bp->line_speed = SPEED_10;
1529 break;
1530 case BNX2_LINK_STATUS_100HALF:
1531 bp->duplex = DUPLEX_HALF;
1532 case BNX2_LINK_STATUS_100BASE_T4:
1533 case BNX2_LINK_STATUS_100FULL:
1534 bp->line_speed = SPEED_100;
1535 break;
1536 case BNX2_LINK_STATUS_1000HALF:
1537 bp->duplex = DUPLEX_HALF;
1538 case BNX2_LINK_STATUS_1000FULL:
1539 bp->line_speed = SPEED_1000;
1540 break;
1541 case BNX2_LINK_STATUS_2500HALF:
1542 bp->duplex = DUPLEX_HALF;
1543 case BNX2_LINK_STATUS_2500FULL:
1544 bp->line_speed = SPEED_2500;
1545 break;
1546 default:
1547 bp->line_speed = 0;
1548 break;
1549 }
1550
1551 spin_lock(&bp->phy_lock);
1552 bp->flow_ctrl = 0;
1553 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1554 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1555 if (bp->duplex == DUPLEX_FULL)
1556 bp->flow_ctrl = bp->req_flow_ctrl;
1557 } else {
1558 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1559 bp->flow_ctrl |= FLOW_CTRL_TX;
1560 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1561 bp->flow_ctrl |= FLOW_CTRL_RX;
1562 }
1563
1564 old_port = bp->phy_port;
1565 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1566 bp->phy_port = PORT_FIBRE;
1567 else
1568 bp->phy_port = PORT_TP;
1569
1570 if (old_port != bp->phy_port)
1571 bnx2_set_default_link(bp);
1572
1573 spin_unlock(&bp->phy_lock);
1574 }
1575 if (bp->link_up != link_up)
1576 bnx2_report_link(bp);
1577
1578 bnx2_set_mac_link(bp);
1579}
1580
1581static int
1582bnx2_set_remote_link(struct bnx2 *bp)
1583{
1584 u32 evt_code;
1585
1586 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1587 switch (evt_code) {
1588 case BNX2_FW_EVT_CODE_LINK_EVENT:
1589 bnx2_remote_phy_event(bp);
1590 break;
1591 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1592 default:
df149d70 1593 bnx2_send_heart_beat(bp);
0d8a6571
MC
1594 break;
1595 }
1596 return 0;
1597}
1598
b6016b76
MC
1599static int
1600bnx2_setup_copper_phy(struct bnx2 *bp)
1601{
1602 u32 bmcr;
1603 u32 new_bmcr;
1604
ca58c3af 1605 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1606
1607 if (bp->autoneg & AUTONEG_SPEED) {
1608 u32 adv_reg, adv1000_reg;
1609 u32 new_adv_reg = 0;
1610 u32 new_adv1000_reg = 0;
1611
ca58c3af 1612 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1613 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1614 ADVERTISE_PAUSE_ASYM);
1615
1616 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1617 adv1000_reg &= PHY_ALL_1000_SPEED;
1618
1619 if (bp->advertising & ADVERTISED_10baseT_Half)
1620 new_adv_reg |= ADVERTISE_10HALF;
1621 if (bp->advertising & ADVERTISED_10baseT_Full)
1622 new_adv_reg |= ADVERTISE_10FULL;
1623 if (bp->advertising & ADVERTISED_100baseT_Half)
1624 new_adv_reg |= ADVERTISE_100HALF;
1625 if (bp->advertising & ADVERTISED_100baseT_Full)
1626 new_adv_reg |= ADVERTISE_100FULL;
1627 if (bp->advertising & ADVERTISED_1000baseT_Full)
1628 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1629
b6016b76
MC
1630 new_adv_reg |= ADVERTISE_CSMA;
1631
1632 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1633
1634 if ((adv1000_reg != new_adv1000_reg) ||
1635 (adv_reg != new_adv_reg) ||
1636 ((bmcr & BMCR_ANENABLE) == 0)) {
1637
ca58c3af 1638 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1639 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1640 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1641 BMCR_ANENABLE);
1642 }
1643 else if (bp->link_up) {
1644 /* Flow ctrl may have changed from auto to forced */
1645 /* or vice-versa. */
1646
1647 bnx2_resolve_flow_ctrl(bp);
1648 bnx2_set_mac_link(bp);
1649 }
1650 return 0;
1651 }
1652
1653 new_bmcr = 0;
1654 if (bp->req_line_speed == SPEED_100) {
1655 new_bmcr |= BMCR_SPEED100;
1656 }
1657 if (bp->req_duplex == DUPLEX_FULL) {
1658 new_bmcr |= BMCR_FULLDPLX;
1659 }
1660 if (new_bmcr != bmcr) {
1661 u32 bmsr;
b6016b76 1662
ca58c3af
MC
1663 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1664 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1665
b6016b76
MC
1666 if (bmsr & BMSR_LSTATUS) {
1667 /* Force link down */
ca58c3af 1668 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1669 spin_unlock_bh(&bp->phy_lock);
1670 msleep(50);
1671 spin_lock_bh(&bp->phy_lock);
1672
ca58c3af
MC
1673 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1674 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1675 }
1676
ca58c3af 1677 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1678
1679 /* Normally, the new speed is setup after the link has
1680 * gone down and up again. In some cases, link will not go
1681 * down so we need to set up the new speed here.
1682 */
1683 if (bmsr & BMSR_LSTATUS) {
1684 bp->line_speed = bp->req_line_speed;
1685 bp->duplex = bp->req_duplex;
1686 bnx2_resolve_flow_ctrl(bp);
1687 bnx2_set_mac_link(bp);
1688 }
27a005b8
MC
1689 } else {
1690 bnx2_resolve_flow_ctrl(bp);
1691 bnx2_set_mac_link(bp);
b6016b76
MC
1692 }
1693 return 0;
1694}
1695
1696static int
0d8a6571 1697bnx2_setup_phy(struct bnx2 *bp, u8 port)
b6016b76
MC
1698{
1699 if (bp->loopback == MAC_LOOPBACK)
1700 return 0;
1701
1702 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 1703 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1704 }
1705 else {
1706 return (bnx2_setup_copper_phy(bp));
1707 }
1708}
1709
27a005b8
MC
1710static int
1711bnx2_init_5709s_phy(struct bnx2 *bp)
1712{
1713 u32 val;
1714
1715 bp->mii_bmcr = MII_BMCR + 0x10;
1716 bp->mii_bmsr = MII_BMSR + 0x10;
1717 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1718 bp->mii_adv = MII_ADVERTISE + 0x10;
1719 bp->mii_lpa = MII_LPA + 0x10;
1720 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1721
1722 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1723 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1724
1725 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1726 bnx2_reset_phy(bp);
1727
1728 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1729
1730 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1731 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1732 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1733 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1734
1735 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1736 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1737 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1738 val |= BCM5708S_UP1_2G5;
1739 else
1740 val &= ~BCM5708S_UP1_2G5;
1741 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1742
1743 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1744 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1745 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1746 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1747
1748 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1749
1750 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1751 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1752 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1753
1754 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1755
1756 return 0;
1757}
1758
b6016b76 1759static int
5b0c76ad
MC
1760bnx2_init_5708s_phy(struct bnx2 *bp)
1761{
1762 u32 val;
1763
27a005b8
MC
1764 bnx2_reset_phy(bp);
1765
1766 bp->mii_up1 = BCM5708S_UP1;
1767
5b0c76ad
MC
1768 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1769 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1770 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1771
1772 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1773 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1774 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1775
1776 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1777 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1778 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1779
1780 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1781 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1782 val |= BCM5708S_UP1_2G5;
1783 bnx2_write_phy(bp, BCM5708S_UP1, val);
1784 }
1785
1786 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1787 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1788 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1789 /* increase tx signal amplitude */
1790 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1791 BCM5708S_BLK_ADDR_TX_MISC);
1792 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1793 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1794 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1795 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1796 }
1797
e3648b3d 1798 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1799 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1800
1801 if (val) {
1802 u32 is_backplane;
1803
e3648b3d 1804 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1805 BNX2_SHARED_HW_CFG_CONFIG);
1806 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1807 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1808 BCM5708S_BLK_ADDR_TX_MISC);
1809 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1810 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1811 BCM5708S_BLK_ADDR_DIG);
1812 }
1813 }
1814 return 0;
1815}
1816
1817static int
1818bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76 1819{
27a005b8
MC
1820 bnx2_reset_phy(bp);
1821
b6016b76
MC
1822 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1823
59b47d8a
MC
1824 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1825 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
1826
1827 if (bp->dev->mtu > 1500) {
1828 u32 val;
1829
1830 /* Set extended packet length bit */
1831 bnx2_write_phy(bp, 0x18, 0x7);
1832 bnx2_read_phy(bp, 0x18, &val);
1833 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1834
1835 bnx2_write_phy(bp, 0x1c, 0x6c00);
1836 bnx2_read_phy(bp, 0x1c, &val);
1837 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1838 }
1839 else {
1840 u32 val;
1841
1842 bnx2_write_phy(bp, 0x18, 0x7);
1843 bnx2_read_phy(bp, 0x18, &val);
1844 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1845
1846 bnx2_write_phy(bp, 0x1c, 0x6c00);
1847 bnx2_read_phy(bp, 0x1c, &val);
1848 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1849 }
1850
1851 return 0;
1852}
1853
1854static int
1855bnx2_init_copper_phy(struct bnx2 *bp)
1856{
5b0c76ad
MC
1857 u32 val;
1858
27a005b8
MC
1859 bnx2_reset_phy(bp);
1860
b6016b76
MC
1861 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1862 bnx2_write_phy(bp, 0x18, 0x0c00);
1863 bnx2_write_phy(bp, 0x17, 0x000a);
1864 bnx2_write_phy(bp, 0x15, 0x310b);
1865 bnx2_write_phy(bp, 0x17, 0x201f);
1866 bnx2_write_phy(bp, 0x15, 0x9506);
1867 bnx2_write_phy(bp, 0x17, 0x401f);
1868 bnx2_write_phy(bp, 0x15, 0x14e2);
1869 bnx2_write_phy(bp, 0x18, 0x0400);
1870 }
1871
b659f44e
MC
1872 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1873 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1874 MII_BNX2_DSP_EXPAND_REG | 0x8);
1875 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1876 val &= ~(1 << 8);
1877 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1878 }
1879
b6016b76 1880 if (bp->dev->mtu > 1500) {
b6016b76
MC
1881 /* Set extended packet length bit */
1882 bnx2_write_phy(bp, 0x18, 0x7);
1883 bnx2_read_phy(bp, 0x18, &val);
1884 bnx2_write_phy(bp, 0x18, val | 0x4000);
1885
1886 bnx2_read_phy(bp, 0x10, &val);
1887 bnx2_write_phy(bp, 0x10, val | 0x1);
1888 }
1889 else {
b6016b76
MC
1890 bnx2_write_phy(bp, 0x18, 0x7);
1891 bnx2_read_phy(bp, 0x18, &val);
1892 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1893
1894 bnx2_read_phy(bp, 0x10, &val);
1895 bnx2_write_phy(bp, 0x10, val & ~0x1);
1896 }
1897
5b0c76ad
MC
1898 /* ethernet@wirespeed */
1899 bnx2_write_phy(bp, 0x18, 0x7007);
1900 bnx2_read_phy(bp, 0x18, &val);
1901 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1902 return 0;
1903}
1904
1905
1906static int
1907bnx2_init_phy(struct bnx2 *bp)
1908{
1909 u32 val;
1910 int rc = 0;
1911
1912 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1913 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1914
ca58c3af
MC
1915 bp->mii_bmcr = MII_BMCR;
1916 bp->mii_bmsr = MII_BMSR;
27a005b8 1917 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
1918 bp->mii_adv = MII_ADVERTISE;
1919 bp->mii_lpa = MII_LPA;
1920
b6016b76
MC
1921 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1922
0d8a6571
MC
1923 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1924 goto setup_phy;
1925
b6016b76
MC
1926 bnx2_read_phy(bp, MII_PHYSID1, &val);
1927 bp->phy_id = val << 16;
1928 bnx2_read_phy(bp, MII_PHYSID2, &val);
1929 bp->phy_id |= val & 0xffff;
1930
1931 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1932 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1933 rc = bnx2_init_5706s_phy(bp);
1934 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1935 rc = bnx2_init_5708s_phy(bp);
27a005b8
MC
1936 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1937 rc = bnx2_init_5709s_phy(bp);
b6016b76
MC
1938 }
1939 else {
1940 rc = bnx2_init_copper_phy(bp);
1941 }
1942
0d8a6571
MC
1943setup_phy:
1944 if (!rc)
1945 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
1946
1947 return rc;
1948}
1949
1950static int
1951bnx2_set_mac_loopback(struct bnx2 *bp)
1952{
1953 u32 mac_mode;
1954
1955 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1956 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1957 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1958 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1959 bp->link_up = 1;
1960 return 0;
1961}
1962
bc5a0690
MC
1963static int bnx2_test_link(struct bnx2 *);
1964
1965static int
1966bnx2_set_phy_loopback(struct bnx2 *bp)
1967{
1968 u32 mac_mode;
1969 int rc, i;
1970
1971 spin_lock_bh(&bp->phy_lock);
ca58c3af 1972 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
1973 BMCR_SPEED1000);
1974 spin_unlock_bh(&bp->phy_lock);
1975 if (rc)
1976 return rc;
1977
1978 for (i = 0; i < 10; i++) {
1979 if (bnx2_test_link(bp) == 0)
1980 break;
80be4434 1981 msleep(100);
bc5a0690
MC
1982 }
1983
1984 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1985 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1986 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1987 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
1988
1989 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1990 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1991 bp->link_up = 1;
1992 return 0;
1993}
1994
b6016b76 1995static int
b090ae2b 1996bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
1997{
1998 int i;
1999 u32 val;
2000
b6016b76
MC
2001 bp->fw_wr_seq++;
2002 msg_data |= bp->fw_wr_seq;
2003
e3648b3d 2004 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
2005
2006 /* wait for an acknowledgement. */
b090ae2b
MC
2007 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2008 msleep(10);
b6016b76 2009
e3648b3d 2010 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
2011
2012 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2013 break;
2014 }
b090ae2b
MC
2015 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2016 return 0;
b6016b76
MC
2017
2018 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2019 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2020 if (!silent)
2021 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2022 "%x\n", msg_data);
b6016b76
MC
2023
2024 msg_data &= ~BNX2_DRV_MSG_CODE;
2025 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2026
e3648b3d 2027 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 2028
b6016b76
MC
2029 return -EBUSY;
2030 }
2031
b090ae2b
MC
2032 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2033 return -EIO;
2034
b6016b76
MC
2035 return 0;
2036}
2037
59b47d8a
MC
2038static int
2039bnx2_init_5709_context(struct bnx2 *bp)
2040{
2041 int i, ret = 0;
2042 u32 val;
2043
2044 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2045 val |= (BCM_PAGE_BITS - 8) << 16;
2046 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2047 for (i = 0; i < 10; i++) {
2048 val = REG_RD(bp, BNX2_CTX_COMMAND);
2049 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2050 break;
2051 udelay(2);
2052 }
2053 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2054 return -EBUSY;
2055
59b47d8a
MC
2056 for (i = 0; i < bp->ctx_pages; i++) {
2057 int j;
2058
2059 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2060 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2061 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2062 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2063 (u64) bp->ctx_blk_mapping[i] >> 32);
2064 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2065 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2066 for (j = 0; j < 10; j++) {
2067
2068 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2069 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2070 break;
2071 udelay(5);
2072 }
2073 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2074 ret = -EBUSY;
2075 break;
2076 }
2077 }
2078 return ret;
2079}
2080
b6016b76
MC
2081static void
2082bnx2_init_context(struct bnx2 *bp)
2083{
2084 u32 vcid;
2085
2086 vcid = 96;
2087 while (vcid) {
2088 u32 vcid_addr, pcid_addr, offset;
7947b20e 2089 int i;
b6016b76
MC
2090
2091 vcid--;
2092
2093 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2094 u32 new_vcid;
2095
2096 vcid_addr = GET_PCID_ADDR(vcid);
2097 if (vcid & 0x8) {
2098 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2099 }
2100 else {
2101 new_vcid = vcid;
2102 }
2103 pcid_addr = GET_PCID_ADDR(new_vcid);
2104 }
2105 else {
2106 vcid_addr = GET_CID_ADDR(vcid);
2107 pcid_addr = vcid_addr;
2108 }
2109
7947b20e
MC
2110 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2111 vcid_addr += (i << PHY_CTX_SHIFT);
2112 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2113
7947b20e
MC
2114 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2115 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2116
7947b20e
MC
2117 /* Zero out the context. */
2118 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2119 CTX_WR(bp, 0x00, offset, 0);
2120
2121 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2122 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2123 }
b6016b76
MC
2124 }
2125}
2126
2127static int
2128bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2129{
2130 u16 *good_mbuf;
2131 u32 good_mbuf_cnt;
2132 u32 val;
2133
2134 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2135 if (good_mbuf == NULL) {
2136 printk(KERN_ERR PFX "Failed to allocate memory in "
2137 "bnx2_alloc_bad_rbuf\n");
2138 return -ENOMEM;
2139 }
2140
2141 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2142 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2143
2144 good_mbuf_cnt = 0;
2145
2146 /* Allocate a bunch of mbufs and save the good ones in an array. */
2147 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2148 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2149 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2150
2151 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2152
2153 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2154
2155 /* The addresses with Bit 9 set are bad memory blocks. */
2156 if (!(val & (1 << 9))) {
2157 good_mbuf[good_mbuf_cnt] = (u16) val;
2158 good_mbuf_cnt++;
2159 }
2160
2161 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2162 }
2163
2164 /* Free the good ones back to the mbuf pool thus discarding
2165 * all the bad ones. */
2166 while (good_mbuf_cnt) {
2167 good_mbuf_cnt--;
2168
2169 val = good_mbuf[good_mbuf_cnt];
2170 val = (val << 9) | val | 1;
2171
2172 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2173 }
2174 kfree(good_mbuf);
2175 return 0;
2176}
2177
2178static void
6aa20a22 2179bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
2180{
2181 u32 val;
2182 u8 *mac_addr = bp->dev->dev_addr;
2183
2184 val = (mac_addr[0] << 8) | mac_addr[1];
2185
2186 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2187
6aa20a22 2188 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2189 (mac_addr[4] << 8) | mac_addr[5];
2190
2191 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2192}
2193
2194static inline int
2195bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2196{
2197 struct sk_buff *skb;
2198 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2199 dma_addr_t mapping;
13daffa2 2200 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2201 unsigned long align;
2202
932f3772 2203 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2204 if (skb == NULL) {
2205 return -ENOMEM;
2206 }
2207
59b47d8a
MC
2208 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2209 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2210
b6016b76
MC
2211 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2212 PCI_DMA_FROMDEVICE);
2213
2214 rx_buf->skb = skb;
2215 pci_unmap_addr_set(rx_buf, mapping, mapping);
2216
2217 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2218 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2219
2220 bp->rx_prod_bseq += bp->rx_buf_use_size;
2221
2222 return 0;
2223}
2224
da3e4fbe
MC
2225static int
2226bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
b6016b76 2227{
da3e4fbe 2228 struct status_block *sblk = bp->status_blk;
b6016b76 2229 u32 new_link_state, old_link_state;
da3e4fbe 2230 int is_set = 1;
b6016b76 2231
da3e4fbe
MC
2232 new_link_state = sblk->status_attn_bits & event;
2233 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2234 if (new_link_state != old_link_state) {
da3e4fbe
MC
2235 if (new_link_state)
2236 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2237 else
2238 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2239 } else
2240 is_set = 0;
2241
2242 return is_set;
2243}
2244
2245static void
2246bnx2_phy_int(struct bnx2 *bp)
2247{
2248 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2249 spin_lock(&bp->phy_lock);
b6016b76 2250 bnx2_set_link(bp);
da3e4fbe 2251 spin_unlock(&bp->phy_lock);
b6016b76 2252 }
0d8a6571
MC
2253 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2254 bnx2_set_remote_link(bp);
2255
b6016b76
MC
2256}
2257
2258static void
2259bnx2_tx_int(struct bnx2 *bp)
2260{
f4e418f7 2261 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2262 u16 hw_cons, sw_cons, sw_ring_cons;
2263 int tx_free_bd = 0;
2264
f4e418f7 2265 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
b6016b76
MC
2266 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2267 hw_cons++;
2268 }
2269 sw_cons = bp->tx_cons;
2270
2271 while (sw_cons != hw_cons) {
2272 struct sw_bd *tx_buf;
2273 struct sk_buff *skb;
2274 int i, last;
2275
2276 sw_ring_cons = TX_RING_IDX(sw_cons);
2277
2278 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2279 skb = tx_buf->skb;
1d39ed56 2280
b6016b76 2281 /* partial BD completions possible with TSO packets */
89114afd 2282 if (skb_is_gso(skb)) {
b6016b76
MC
2283 u16 last_idx, last_ring_idx;
2284
2285 last_idx = sw_cons +
2286 skb_shinfo(skb)->nr_frags + 1;
2287 last_ring_idx = sw_ring_cons +
2288 skb_shinfo(skb)->nr_frags + 1;
2289 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2290 last_idx++;
2291 }
2292 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2293 break;
2294 }
2295 }
1d39ed56 2296
b6016b76
MC
2297 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2298 skb_headlen(skb), PCI_DMA_TODEVICE);
2299
2300 tx_buf->skb = NULL;
2301 last = skb_shinfo(skb)->nr_frags;
2302
2303 for (i = 0; i < last; i++) {
2304 sw_cons = NEXT_TX_BD(sw_cons);
2305
2306 pci_unmap_page(bp->pdev,
2307 pci_unmap_addr(
2308 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2309 mapping),
2310 skb_shinfo(skb)->frags[i].size,
2311 PCI_DMA_TODEVICE);
2312 }
2313
2314 sw_cons = NEXT_TX_BD(sw_cons);
2315
2316 tx_free_bd += last + 1;
2317
745720e5 2318 dev_kfree_skb(skb);
b6016b76 2319
f4e418f7
MC
2320 hw_cons = bp->hw_tx_cons =
2321 sblk->status_tx_quick_consumer_index0;
2322
b6016b76
MC
2323 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2324 hw_cons++;
2325 }
2326 }
2327
e89bbf10 2328 bp->tx_cons = sw_cons;
2f8af120
MC
2329 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2330 * before checking for netif_queue_stopped(). Without the
2331 * memory barrier, there is a small possibility that bnx2_start_xmit()
2332 * will miss it and cause the queue to be stopped forever.
2333 */
2334 smp_mb();
b6016b76 2335
2f8af120
MC
2336 if (unlikely(netif_queue_stopped(bp->dev)) &&
2337 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2338 netif_tx_lock(bp->dev);
b6016b76 2339 if ((netif_queue_stopped(bp->dev)) &&
2f8af120 2340 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
b6016b76 2341 netif_wake_queue(bp->dev);
2f8af120 2342 netif_tx_unlock(bp->dev);
b6016b76 2343 }
b6016b76
MC
2344}
2345
2346static inline void
2347bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2348 u16 cons, u16 prod)
2349{
236b6394
MC
2350 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2351 struct rx_bd *cons_bd, *prod_bd;
2352
2353 cons_rx_buf = &bp->rx_buf_ring[cons];
2354 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
2355
2356 pci_dma_sync_single_for_device(bp->pdev,
2357 pci_unmap_addr(cons_rx_buf, mapping),
2358 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2359
236b6394 2360 bp->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2361
236b6394 2362 prod_rx_buf->skb = skb;
b6016b76 2363
236b6394
MC
2364 if (cons == prod)
2365 return;
b6016b76 2366
236b6394
MC
2367 pci_unmap_addr_set(prod_rx_buf, mapping,
2368 pci_unmap_addr(cons_rx_buf, mapping));
2369
3fdfcc2c
MC
2370 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2371 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2372 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2373 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2374}
2375
2376static int
2377bnx2_rx_int(struct bnx2 *bp, int budget)
2378{
f4e418f7 2379 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2380 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2381 struct l2_fhdr *rx_hdr;
2382 int rx_pkt = 0;
2383
f4e418f7 2384 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
b6016b76
MC
2385 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2386 hw_cons++;
2387 }
2388 sw_cons = bp->rx_cons;
2389 sw_prod = bp->rx_prod;
2390
2391 /* Memory barrier necessary as speculative reads of the rx
2392 * buffer can be ahead of the index in the status block
2393 */
2394 rmb();
2395 while (sw_cons != hw_cons) {
2396 unsigned int len;
ade2bfe7 2397 u32 status;
b6016b76
MC
2398 struct sw_bd *rx_buf;
2399 struct sk_buff *skb;
236b6394 2400 dma_addr_t dma_addr;
b6016b76
MC
2401
2402 sw_ring_cons = RX_RING_IDX(sw_cons);
2403 sw_ring_prod = RX_RING_IDX(sw_prod);
2404
2405 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2406 skb = rx_buf->skb;
236b6394
MC
2407
2408 rx_buf->skb = NULL;
2409
2410 dma_addr = pci_unmap_addr(rx_buf, mapping);
2411
2412 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
2413 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2414
2415 rx_hdr = (struct l2_fhdr *) skb->data;
2416 len = rx_hdr->l2_fhdr_pkt_len - 4;
2417
ade2bfe7 2418 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
2419 (L2_FHDR_ERRORS_BAD_CRC |
2420 L2_FHDR_ERRORS_PHY_DECODE |
2421 L2_FHDR_ERRORS_ALIGNMENT |
2422 L2_FHDR_ERRORS_TOO_SHORT |
2423 L2_FHDR_ERRORS_GIANT_FRAME)) {
2424
2425 goto reuse_rx;
2426 }
2427
2428 /* Since we don't have a jumbo ring, copy small packets
2429 * if mtu > 1500
2430 */
2431 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2432 struct sk_buff *new_skb;
2433
932f3772 2434 new_skb = netdev_alloc_skb(bp->dev, len + 2);
b6016b76
MC
2435 if (new_skb == NULL)
2436 goto reuse_rx;
2437
2438 /* aligned copy */
d626f62b
ACM
2439 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2440 new_skb->data, len + 2);
b6016b76
MC
2441 skb_reserve(new_skb, 2);
2442 skb_put(new_skb, len);
b6016b76
MC
2443
2444 bnx2_reuse_rx_skb(bp, skb,
2445 sw_ring_cons, sw_ring_prod);
2446
2447 skb = new_skb;
2448 }
2449 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
236b6394 2450 pci_unmap_single(bp->pdev, dma_addr,
b6016b76
MC
2451 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2452
2453 skb_reserve(skb, bp->rx_offset);
2454 skb_put(skb, len);
2455 }
2456 else {
2457reuse_rx:
2458 bnx2_reuse_rx_skb(bp, skb,
2459 sw_ring_cons, sw_ring_prod);
2460 goto next_rx;
2461 }
2462
2463 skb->protocol = eth_type_trans(skb, bp->dev);
2464
2465 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 2466 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 2467
745720e5 2468 dev_kfree_skb(skb);
b6016b76
MC
2469 goto next_rx;
2470
2471 }
2472
b6016b76
MC
2473 skb->ip_summed = CHECKSUM_NONE;
2474 if (bp->rx_csum &&
2475 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2476 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2477
ade2bfe7
MC
2478 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2479 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
2480 skb->ip_summed = CHECKSUM_UNNECESSARY;
2481 }
2482
2483#ifdef BCM_VLAN
2484 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2485 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2486 rx_hdr->l2_fhdr_vlan_tag);
2487 }
2488 else
2489#endif
2490 netif_receive_skb(skb);
2491
2492 bp->dev->last_rx = jiffies;
2493 rx_pkt++;
2494
2495next_rx:
b6016b76
MC
2496 sw_cons = NEXT_RX_BD(sw_cons);
2497 sw_prod = NEXT_RX_BD(sw_prod);
2498
2499 if ((rx_pkt == budget))
2500 break;
f4e418f7
MC
2501
2502 /* Refresh hw_cons to see if there is new work */
2503 if (sw_cons == hw_cons) {
2504 hw_cons = bp->hw_rx_cons =
2505 sblk->status_rx_quick_consumer_index0;
2506 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2507 hw_cons++;
2508 rmb();
2509 }
b6016b76
MC
2510 }
2511 bp->rx_cons = sw_cons;
2512 bp->rx_prod = sw_prod;
2513
2514 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2515
2516 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2517
2518 mmiowb();
2519
2520 return rx_pkt;
2521
2522}
2523
2524/* MSI ISR - The only difference between this and the INTx ISR
2525 * is that the MSI interrupt is always serviced.
2526 */
2527static irqreturn_t
7d12e780 2528bnx2_msi(int irq, void *dev_instance)
b6016b76
MC
2529{
2530 struct net_device *dev = dev_instance;
972ec0d4 2531 struct bnx2 *bp = netdev_priv(dev);
b6016b76 2532
c921e4c4 2533 prefetch(bp->status_blk);
b6016b76
MC
2534 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2535 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2536 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2537
2538 /* Return here if interrupt is disabled. */
73eef4cd
MC
2539 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2540 return IRQ_HANDLED;
b6016b76 2541
73eef4cd 2542 netif_rx_schedule(dev);
b6016b76 2543
73eef4cd 2544 return IRQ_HANDLED;
b6016b76
MC
2545}
2546
8e6a72c4
MC
2547static irqreturn_t
2548bnx2_msi_1shot(int irq, void *dev_instance)
2549{
2550 struct net_device *dev = dev_instance;
2551 struct bnx2 *bp = netdev_priv(dev);
2552
2553 prefetch(bp->status_blk);
2554
2555 /* Return here if interrupt is disabled. */
2556 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2557 return IRQ_HANDLED;
2558
2559 netif_rx_schedule(dev);
2560
2561 return IRQ_HANDLED;
2562}
2563
b6016b76 2564static irqreturn_t
7d12e780 2565bnx2_interrupt(int irq, void *dev_instance)
b6016b76
MC
2566{
2567 struct net_device *dev = dev_instance;
972ec0d4 2568 struct bnx2 *bp = netdev_priv(dev);
b8a7ce7b 2569 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2570
2571 /* When using INTx, it is possible for the interrupt to arrive
2572 * at the CPU before the status block posted prior to the
2573 * interrupt. Reading a register will flush the status block.
2574 * When using MSI, the MSI message will always complete after
2575 * the status block write.
2576 */
b8a7ce7b 2577 if ((sblk->status_idx == bp->last_status_idx) &&
b6016b76
MC
2578 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2579 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 2580 return IRQ_NONE;
b6016b76
MC
2581
2582 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2583 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2584 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2585
b8a7ce7b
MC
2586 /* Read back to deassert IRQ immediately to avoid too many
2587 * spurious interrupts.
2588 */
2589 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2590
b6016b76 2591 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
2592 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2593 return IRQ_HANDLED;
b6016b76 2594
b8a7ce7b
MC
2595 if (netif_rx_schedule_prep(dev)) {
2596 bp->last_status_idx = sblk->status_idx;
2597 __netif_rx_schedule(dev);
2598 }
b6016b76 2599
73eef4cd 2600 return IRQ_HANDLED;
b6016b76
MC
2601}
2602
0d8a6571
MC
2603#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2604 STATUS_ATTN_BITS_TIMER_ABORT)
da3e4fbe 2605
f4e418f7
MC
2606static inline int
2607bnx2_has_work(struct bnx2 *bp)
2608{
2609 struct status_block *sblk = bp->status_blk;
2610
2611 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2612 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2613 return 1;
2614
da3e4fbe
MC
2615 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2616 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
2617 return 1;
2618
2619 return 0;
2620}
2621
b6016b76
MC
2622static int
2623bnx2_poll(struct net_device *dev, int *budget)
2624{
972ec0d4 2625 struct bnx2 *bp = netdev_priv(dev);
da3e4fbe
MC
2626 struct status_block *sblk = bp->status_blk;
2627 u32 status_attn_bits = sblk->status_attn_bits;
2628 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 2629
da3e4fbe
MC
2630 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2631 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 2632
b6016b76 2633 bnx2_phy_int(bp);
bf5295bb
MC
2634
2635 /* This is needed to take care of transient status
2636 * during link changes.
2637 */
2638 REG_WR(bp, BNX2_HC_COMMAND,
2639 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2640 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
2641 }
2642
f4e418f7 2643 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
b6016b76 2644 bnx2_tx_int(bp);
b6016b76 2645
f4e418f7 2646 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
b6016b76
MC
2647 int orig_budget = *budget;
2648 int work_done;
2649
2650 if (orig_budget > dev->quota)
2651 orig_budget = dev->quota;
6aa20a22 2652
b6016b76
MC
2653 work_done = bnx2_rx_int(bp, orig_budget);
2654 *budget -= work_done;
2655 dev->quota -= work_done;
b6016b76 2656 }
6aa20a22 2657
f4e418f7
MC
2658 bp->last_status_idx = bp->status_blk->status_idx;
2659 rmb();
2660
2661 if (!bnx2_has_work(bp)) {
b6016b76 2662 netif_rx_complete(dev);
1269a8a6
MC
2663 if (likely(bp->flags & USING_MSI_FLAG)) {
2664 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2665 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2666 bp->last_status_idx);
2667 return 0;
2668 }
2669 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2670 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2671 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2672 bp->last_status_idx);
2673
b6016b76 2674 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1269a8a6
MC
2675 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2676 bp->last_status_idx);
b6016b76
MC
2677 return 0;
2678 }
2679
2680 return 1;
2681}
2682
932ff279 2683/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
2684 * from set_multicast.
2685 */
2686static void
2687bnx2_set_rx_mode(struct net_device *dev)
2688{
972ec0d4 2689 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2690 u32 rx_mode, sort_mode;
2691 int i;
b6016b76 2692
c770a65c 2693 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
2694
2695 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2696 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2697 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2698#ifdef BCM_VLAN
e29054f9 2699 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 2700 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 2701#else
e29054f9
MC
2702 if (!(bp->flags & ASF_ENABLE_FLAG))
2703 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
2704#endif
2705 if (dev->flags & IFF_PROMISC) {
2706 /* Promiscuous mode. */
2707 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
2708 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2709 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
2710 }
2711 else if (dev->flags & IFF_ALLMULTI) {
2712 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2713 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2714 0xffffffff);
2715 }
2716 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2717 }
2718 else {
2719 /* Accept one or more multicast(s). */
2720 struct dev_mc_list *mclist;
2721 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2722 u32 regidx;
2723 u32 bit;
2724 u32 crc;
2725
2726 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2727
2728 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2729 i++, mclist = mclist->next) {
2730
2731 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2732 bit = crc & 0xff;
2733 regidx = (bit & 0xe0) >> 5;
2734 bit &= 0x1f;
2735 mc_filter[regidx] |= (1 << bit);
2736 }
2737
2738 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2739 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2740 mc_filter[i]);
2741 }
2742
2743 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2744 }
2745
2746 if (rx_mode != bp->rx_mode) {
2747 bp->rx_mode = rx_mode;
2748 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2749 }
2750
2751 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2752 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2753 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2754
c770a65c 2755 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
2756}
2757
fba9fe91
MC
2758#define FW_BUF_SIZE 0x8000
2759
2760static int
2761bnx2_gunzip_init(struct bnx2 *bp)
2762{
2763 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2764 goto gunzip_nomem1;
2765
2766 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2767 goto gunzip_nomem2;
2768
2769 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2770 if (bp->strm->workspace == NULL)
2771 goto gunzip_nomem3;
2772
2773 return 0;
2774
2775gunzip_nomem3:
2776 kfree(bp->strm);
2777 bp->strm = NULL;
2778
2779gunzip_nomem2:
2780 vfree(bp->gunzip_buf);
2781 bp->gunzip_buf = NULL;
2782
2783gunzip_nomem1:
2784 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2785 "uncompression.\n", bp->dev->name);
2786 return -ENOMEM;
2787}
2788
2789static void
2790bnx2_gunzip_end(struct bnx2 *bp)
2791{
2792 kfree(bp->strm->workspace);
2793
2794 kfree(bp->strm);
2795 bp->strm = NULL;
2796
2797 if (bp->gunzip_buf) {
2798 vfree(bp->gunzip_buf);
2799 bp->gunzip_buf = NULL;
2800 }
2801}
2802
2803static int
2804bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2805{
2806 int n, rc;
2807
2808 /* check gzip header */
2809 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2810 return -EINVAL;
2811
2812 n = 10;
2813
2814#define FNAME 0x8
2815 if (zbuf[3] & FNAME)
2816 while ((zbuf[n++] != 0) && (n < len));
2817
2818 bp->strm->next_in = zbuf + n;
2819 bp->strm->avail_in = len - n;
2820 bp->strm->next_out = bp->gunzip_buf;
2821 bp->strm->avail_out = FW_BUF_SIZE;
2822
2823 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2824 if (rc != Z_OK)
2825 return rc;
2826
2827 rc = zlib_inflate(bp->strm, Z_FINISH);
2828
2829 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2830 *outbuf = bp->gunzip_buf;
2831
2832 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2833 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2834 bp->dev->name, bp->strm->msg);
2835
2836 zlib_inflateEnd(bp->strm);
2837
2838 if (rc == Z_STREAM_END)
2839 return 0;
2840
2841 return rc;
2842}
2843
b6016b76
MC
2844static void
2845load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2846 u32 rv2p_proc)
2847{
2848 int i;
2849 u32 val;
2850
2851
2852 for (i = 0; i < rv2p_code_len; i += 8) {
fba9fe91 2853 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
b6016b76 2854 rv2p_code++;
fba9fe91 2855 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
b6016b76
MC
2856 rv2p_code++;
2857
2858 if (rv2p_proc == RV2P_PROC1) {
2859 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2860 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2861 }
2862 else {
2863 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2864 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2865 }
2866 }
2867
2868 /* Reset the processor, un-stall is done later. */
2869 if (rv2p_proc == RV2P_PROC1) {
2870 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2871 }
2872 else {
2873 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2874 }
2875}
2876
af3ee519 2877static int
b6016b76
MC
2878load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2879{
2880 u32 offset;
2881 u32 val;
af3ee519 2882 int rc;
b6016b76
MC
2883
2884 /* Halt the CPU. */
2885 val = REG_RD_IND(bp, cpu_reg->mode);
2886 val |= cpu_reg->mode_value_halt;
2887 REG_WR_IND(bp, cpu_reg->mode, val);
2888 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2889
2890 /* Load the Text area. */
2891 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519
MC
2892 if (fw->gz_text) {
2893 u32 text_len;
2894 void *text;
2895
2896 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2897 &text_len);
2898 if (rc)
2899 return rc;
2900
2901 fw->text = text;
2902 }
2903 if (fw->gz_text) {
b6016b76
MC
2904 int j;
2905
2906 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
fba9fe91 2907 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
b6016b76
MC
2908 }
2909 }
2910
2911 /* Load the Data area. */
2912 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2913 if (fw->data) {
2914 int j;
2915
2916 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2917 REG_WR_IND(bp, offset, fw->data[j]);
2918 }
2919 }
2920
2921 /* Load the SBSS area. */
2922 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2923 if (fw->sbss) {
2924 int j;
2925
2926 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2927 REG_WR_IND(bp, offset, fw->sbss[j]);
2928 }
2929 }
2930
2931 /* Load the BSS area. */
2932 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2933 if (fw->bss) {
2934 int j;
2935
2936 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2937 REG_WR_IND(bp, offset, fw->bss[j]);
2938 }
2939 }
2940
2941 /* Load the Read-Only area. */
2942 offset = cpu_reg->spad_base +
2943 (fw->rodata_addr - cpu_reg->mips_view_base);
2944 if (fw->rodata) {
2945 int j;
2946
2947 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2948 REG_WR_IND(bp, offset, fw->rodata[j]);
2949 }
2950 }
2951
2952 /* Clear the pre-fetch instruction. */
2953 REG_WR_IND(bp, cpu_reg->inst, 0);
2954 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2955
2956 /* Start the CPU. */
2957 val = REG_RD_IND(bp, cpu_reg->mode);
2958 val &= ~cpu_reg->mode_value_halt;
2959 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2960 REG_WR_IND(bp, cpu_reg->mode, val);
af3ee519
MC
2961
2962 return 0;
b6016b76
MC
2963}
2964
fba9fe91 2965static int
b6016b76
MC
2966bnx2_init_cpus(struct bnx2 *bp)
2967{
2968 struct cpu_reg cpu_reg;
af3ee519 2969 struct fw_info *fw;
fba9fe91
MC
2970 int rc = 0;
2971 void *text;
2972 u32 text_len;
2973
2974 if ((rc = bnx2_gunzip_init(bp)) != 0)
2975 return rc;
b6016b76
MC
2976
2977 /* Initialize the RV2P processor. */
fba9fe91
MC
2978 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2979 &text_len);
2980 if (rc)
2981 goto init_cpu_err;
2982
2983 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2984
2985 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2986 &text_len);
2987 if (rc)
2988 goto init_cpu_err;
2989
2990 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
b6016b76
MC
2991
2992 /* Initialize the RX Processor. */
2993 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2994 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2995 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2996 cpu_reg.state = BNX2_RXP_CPU_STATE;
2997 cpu_reg.state_value_clear = 0xffffff;
2998 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2999 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3000 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3001 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3002 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3003 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3004 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3005
d43584c8
MC
3006 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3007 fw = &bnx2_rxp_fw_09;
3008 else
3009 fw = &bnx2_rxp_fw_06;
fba9fe91 3010
af3ee519 3011 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3012 if (rc)
3013 goto init_cpu_err;
3014
b6016b76
MC
3015 /* Initialize the TX Processor. */
3016 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3017 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3018 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3019 cpu_reg.state = BNX2_TXP_CPU_STATE;
3020 cpu_reg.state_value_clear = 0xffffff;
3021 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3022 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3023 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3024 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3025 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3026 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3027 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3028
d43584c8
MC
3029 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3030 fw = &bnx2_txp_fw_09;
3031 else
3032 fw = &bnx2_txp_fw_06;
fba9fe91 3033
af3ee519 3034 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3035 if (rc)
3036 goto init_cpu_err;
3037
b6016b76
MC
3038 /* Initialize the TX Patch-up Processor. */
3039 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3040 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3041 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3042 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3043 cpu_reg.state_value_clear = 0xffffff;
3044 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3045 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3046 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3047 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3048 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3049 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3050 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3051
d43584c8
MC
3052 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3053 fw = &bnx2_tpat_fw_09;
3054 else
3055 fw = &bnx2_tpat_fw_06;
fba9fe91 3056
af3ee519 3057 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3058 if (rc)
3059 goto init_cpu_err;
3060
b6016b76
MC
3061 /* Initialize the Completion Processor. */
3062 cpu_reg.mode = BNX2_COM_CPU_MODE;
3063 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3064 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3065 cpu_reg.state = BNX2_COM_CPU_STATE;
3066 cpu_reg.state_value_clear = 0xffffff;
3067 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3068 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3069 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3070 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3071 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3072 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3073 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3074
d43584c8
MC
3075 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3076 fw = &bnx2_com_fw_09;
3077 else
3078 fw = &bnx2_com_fw_06;
fba9fe91 3079
af3ee519 3080 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3081 if (rc)
3082 goto init_cpu_err;
3083
d43584c8
MC
3084 /* Initialize the Command Processor. */
3085 cpu_reg.mode = BNX2_CP_CPU_MODE;
3086 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3087 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3088 cpu_reg.state = BNX2_CP_CPU_STATE;
3089 cpu_reg.state_value_clear = 0xffffff;
3090 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3091 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3092 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3093 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3094 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3095 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3096 cpu_reg.mips_view_base = 0x8000000;
b6016b76 3097
d43584c8
MC
3098 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3099 fw = &bnx2_cp_fw_09;
b6016b76 3100
6c1bbcc8 3101 rc = load_cpu_fw(bp, &cpu_reg, fw);
d43584c8
MC
3102 if (rc)
3103 goto init_cpu_err;
3104 }
fba9fe91
MC
3105init_cpu_err:
3106 bnx2_gunzip_end(bp);
3107 return rc;
b6016b76
MC
3108}
3109
3110static int
829ca9a3 3111bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3112{
3113 u16 pmcsr;
3114
3115 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3116
3117 switch (state) {
829ca9a3 3118 case PCI_D0: {
b6016b76
MC
3119 u32 val;
3120
3121 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3122 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3123 PCI_PM_CTRL_PME_STATUS);
3124
3125 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3126 /* delay required during transition out of D3hot */
3127 msleep(20);
3128
3129 val = REG_RD(bp, BNX2_EMAC_MODE);
3130 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3131 val &= ~BNX2_EMAC_MODE_MPKT;
3132 REG_WR(bp, BNX2_EMAC_MODE, val);
3133
3134 val = REG_RD(bp, BNX2_RPM_CONFIG);
3135 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3136 REG_WR(bp, BNX2_RPM_CONFIG, val);
3137 break;
3138 }
829ca9a3 3139 case PCI_D3hot: {
b6016b76
MC
3140 int i;
3141 u32 val, wol_msg;
3142
3143 if (bp->wol) {
3144 u32 advertising;
3145 u8 autoneg;
3146
3147 autoneg = bp->autoneg;
3148 advertising = bp->advertising;
3149
3150 bp->autoneg = AUTONEG_SPEED;
3151 bp->advertising = ADVERTISED_10baseT_Half |
3152 ADVERTISED_10baseT_Full |
3153 ADVERTISED_100baseT_Half |
3154 ADVERTISED_100baseT_Full |
3155 ADVERTISED_Autoneg;
3156
3157 bnx2_setup_copper_phy(bp);
3158
3159 bp->autoneg = autoneg;
3160 bp->advertising = advertising;
3161
3162 bnx2_set_mac_addr(bp);
3163
3164 val = REG_RD(bp, BNX2_EMAC_MODE);
3165
3166 /* Enable port mode. */
3167 val &= ~BNX2_EMAC_MODE_PORT;
3168 val |= BNX2_EMAC_MODE_PORT_MII |
3169 BNX2_EMAC_MODE_MPKT_RCVD |
3170 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76
MC
3171 BNX2_EMAC_MODE_MPKT;
3172
3173 REG_WR(bp, BNX2_EMAC_MODE, val);
3174
3175 /* receive all multicast */
3176 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3177 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3178 0xffffffff);
3179 }
3180 REG_WR(bp, BNX2_EMAC_RX_MODE,
3181 BNX2_EMAC_RX_MODE_SORT_MODE);
3182
3183 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3184 BNX2_RPM_SORT_USER0_MC_EN;
3185 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3186 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3187 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3188 BNX2_RPM_SORT_USER0_ENA);
3189
3190 /* Need to enable EMAC and RPM for WOL. */
3191 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3192 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3193 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3194 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3195
3196 val = REG_RD(bp, BNX2_RPM_CONFIG);
3197 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3198 REG_WR(bp, BNX2_RPM_CONFIG, val);
3199
3200 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3201 }
3202 else {
3203 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3204 }
3205
dda1e390
MC
3206 if (!(bp->flags & NO_WOL_FLAG))
3207 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
3208
3209 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3210 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3211 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3212
3213 if (bp->wol)
3214 pmcsr |= 3;
3215 }
3216 else {
3217 pmcsr |= 3;
3218 }
3219 if (bp->wol) {
3220 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3221 }
3222 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3223 pmcsr);
3224
3225 /* No more memory access after this point until
3226 * device is brought back to D0.
3227 */
3228 udelay(50);
3229 break;
3230 }
3231 default:
3232 return -EINVAL;
3233 }
3234 return 0;
3235}
3236
3237static int
3238bnx2_acquire_nvram_lock(struct bnx2 *bp)
3239{
3240 u32 val;
3241 int j;
3242
3243 /* Request access to the flash interface. */
3244 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3245 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3246 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3247 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3248 break;
3249
3250 udelay(5);
3251 }
3252
3253 if (j >= NVRAM_TIMEOUT_COUNT)
3254 return -EBUSY;
3255
3256 return 0;
3257}
3258
3259static int
3260bnx2_release_nvram_lock(struct bnx2 *bp)
3261{
3262 int j;
3263 u32 val;
3264
3265 /* Relinquish nvram interface. */
3266 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3267
3268 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3269 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3270 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3271 break;
3272
3273 udelay(5);
3274 }
3275
3276 if (j >= NVRAM_TIMEOUT_COUNT)
3277 return -EBUSY;
3278
3279 return 0;
3280}
3281
3282
3283static int
3284bnx2_enable_nvram_write(struct bnx2 *bp)
3285{
3286 u32 val;
3287
3288 val = REG_RD(bp, BNX2_MISC_CFG);
3289 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3290
3291 if (!bp->flash_info->buffered) {
3292 int j;
3293
3294 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3295 REG_WR(bp, BNX2_NVM_COMMAND,
3296 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3297
3298 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3299 udelay(5);
3300
3301 val = REG_RD(bp, BNX2_NVM_COMMAND);
3302 if (val & BNX2_NVM_COMMAND_DONE)
3303 break;
3304 }
3305
3306 if (j >= NVRAM_TIMEOUT_COUNT)
3307 return -EBUSY;
3308 }
3309 return 0;
3310}
3311
3312static void
3313bnx2_disable_nvram_write(struct bnx2 *bp)
3314{
3315 u32 val;
3316
3317 val = REG_RD(bp, BNX2_MISC_CFG);
3318 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3319}
3320
3321
3322static void
3323bnx2_enable_nvram_access(struct bnx2 *bp)
3324{
3325 u32 val;
3326
3327 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3328 /* Enable both bits, even on read. */
6aa20a22 3329 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3330 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3331}
3332
3333static void
3334bnx2_disable_nvram_access(struct bnx2 *bp)
3335{
3336 u32 val;
3337
3338 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3339 /* Disable both bits, even after read. */
6aa20a22 3340 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3341 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3342 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3343}
3344
3345static int
3346bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3347{
3348 u32 cmd;
3349 int j;
3350
3351 if (bp->flash_info->buffered)
3352 /* Buffered flash, no erase needed */
3353 return 0;
3354
3355 /* Build an erase command */
3356 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3357 BNX2_NVM_COMMAND_DOIT;
3358
3359 /* Need to clear DONE bit separately. */
3360 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3361
3362 /* Address of the NVRAM to read from. */
3363 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3364
3365 /* Issue an erase command. */
3366 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3367
3368 /* Wait for completion. */
3369 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3370 u32 val;
3371
3372 udelay(5);
3373
3374 val = REG_RD(bp, BNX2_NVM_COMMAND);
3375 if (val & BNX2_NVM_COMMAND_DONE)
3376 break;
3377 }
3378
3379 if (j >= NVRAM_TIMEOUT_COUNT)
3380 return -EBUSY;
3381
3382 return 0;
3383}
3384
3385static int
3386bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3387{
3388 u32 cmd;
3389 int j;
3390
3391 /* Build the command word. */
3392 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3393
3394 /* Calculate an offset of a buffered flash. */
3395 if (bp->flash_info->buffered) {
3396 offset = ((offset / bp->flash_info->page_size) <<
3397 bp->flash_info->page_bits) +
3398 (offset % bp->flash_info->page_size);
3399 }
3400
3401 /* Need to clear DONE bit separately. */
3402 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3403
3404 /* Address of the NVRAM to read from. */
3405 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3406
3407 /* Issue a read command. */
3408 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3409
3410 /* Wait for completion. */
3411 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3412 u32 val;
3413
3414 udelay(5);
3415
3416 val = REG_RD(bp, BNX2_NVM_COMMAND);
3417 if (val & BNX2_NVM_COMMAND_DONE) {
3418 val = REG_RD(bp, BNX2_NVM_READ);
3419
3420 val = be32_to_cpu(val);
3421 memcpy(ret_val, &val, 4);
3422 break;
3423 }
3424 }
3425 if (j >= NVRAM_TIMEOUT_COUNT)
3426 return -EBUSY;
3427
3428 return 0;
3429}
3430
3431
3432static int
3433bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3434{
3435 u32 cmd, val32;
3436 int j;
3437
3438 /* Build the command word. */
3439 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3440
3441 /* Calculate an offset of a buffered flash. */
3442 if (bp->flash_info->buffered) {
3443 offset = ((offset / bp->flash_info->page_size) <<
3444 bp->flash_info->page_bits) +
3445 (offset % bp->flash_info->page_size);
3446 }
3447
3448 /* Need to clear DONE bit separately. */
3449 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3450
3451 memcpy(&val32, val, 4);
3452 val32 = cpu_to_be32(val32);
3453
3454 /* Write the data. */
3455 REG_WR(bp, BNX2_NVM_WRITE, val32);
3456
3457 /* Address of the NVRAM to write to. */
3458 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3459
3460 /* Issue the write command. */
3461 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3462
3463 /* Wait for completion. */
3464 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3465 udelay(5);
3466
3467 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3468 break;
3469 }
3470 if (j >= NVRAM_TIMEOUT_COUNT)
3471 return -EBUSY;
3472
3473 return 0;
3474}
3475
3476static int
3477bnx2_init_nvram(struct bnx2 *bp)
3478{
3479 u32 val;
3480 int j, entry_count, rc;
3481 struct flash_spec *flash;
3482
3483 /* Determine the selected interface. */
3484 val = REG_RD(bp, BNX2_NVM_CFG1);
3485
3486 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3487
3488 rc = 0;
3489 if (val & 0x40000000) {
3490
3491 /* Flash interface has been reconfigured */
3492 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
3493 j++, flash++) {
3494 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3495 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
3496 bp->flash_info = flash;
3497 break;
3498 }
3499 }
3500 }
3501 else {
37137709 3502 u32 mask;
b6016b76
MC
3503 /* Not yet been reconfigured */
3504
37137709
MC
3505 if (val & (1 << 23))
3506 mask = FLASH_BACKUP_STRAP_MASK;
3507 else
3508 mask = FLASH_STRAP_MASK;
3509
b6016b76
MC
3510 for (j = 0, flash = &flash_table[0]; j < entry_count;
3511 j++, flash++) {
3512
37137709 3513 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
3514 bp->flash_info = flash;
3515
3516 /* Request access to the flash interface. */
3517 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3518 return rc;
3519
3520 /* Enable access to flash interface */
3521 bnx2_enable_nvram_access(bp);
3522
3523 /* Reconfigure the flash interface */
3524 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3525 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3526 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3527 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3528
3529 /* Disable access to flash interface */
3530 bnx2_disable_nvram_access(bp);
3531 bnx2_release_nvram_lock(bp);
3532
3533 break;
3534 }
3535 }
3536 } /* if (val & 0x40000000) */
3537
3538 if (j == entry_count) {
3539 bp->flash_info = NULL;
2f23c523 3540 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 3541 return -ENODEV;
b6016b76
MC
3542 }
3543
1122db71
MC
3544 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3545 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3546 if (val)
3547 bp->flash_size = val;
3548 else
3549 bp->flash_size = bp->flash_info->total_size;
3550
b6016b76
MC
3551 return rc;
3552}
3553
3554static int
3555bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3556 int buf_size)
3557{
3558 int rc = 0;
3559 u32 cmd_flags, offset32, len32, extra;
3560
3561 if (buf_size == 0)
3562 return 0;
3563
3564 /* Request access to the flash interface. */
3565 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3566 return rc;
3567
3568 /* Enable access to flash interface */
3569 bnx2_enable_nvram_access(bp);
3570
3571 len32 = buf_size;
3572 offset32 = offset;
3573 extra = 0;
3574
3575 cmd_flags = 0;
3576
3577 if (offset32 & 3) {
3578 u8 buf[4];
3579 u32 pre_len;
3580
3581 offset32 &= ~3;
3582 pre_len = 4 - (offset & 3);
3583
3584 if (pre_len >= len32) {
3585 pre_len = len32;
3586 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3587 BNX2_NVM_COMMAND_LAST;
3588 }
3589 else {
3590 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3591 }
3592
3593 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3594
3595 if (rc)
3596 return rc;
3597
3598 memcpy(ret_buf, buf + (offset & 3), pre_len);
3599
3600 offset32 += 4;
3601 ret_buf += pre_len;
3602 len32 -= pre_len;
3603 }
3604 if (len32 & 3) {
3605 extra = 4 - (len32 & 3);
3606 len32 = (len32 + 4) & ~3;
3607 }
3608
3609 if (len32 == 4) {
3610 u8 buf[4];
3611
3612 if (cmd_flags)
3613 cmd_flags = BNX2_NVM_COMMAND_LAST;
3614 else
3615 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3616 BNX2_NVM_COMMAND_LAST;
3617
3618 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3619
3620 memcpy(ret_buf, buf, 4 - extra);
3621 }
3622 else if (len32 > 0) {
3623 u8 buf[4];
3624
3625 /* Read the first word. */
3626 if (cmd_flags)
3627 cmd_flags = 0;
3628 else
3629 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3630
3631 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3632
3633 /* Advance to the next dword. */
3634 offset32 += 4;
3635 ret_buf += 4;
3636 len32 -= 4;
3637
3638 while (len32 > 4 && rc == 0) {
3639 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3640
3641 /* Advance to the next dword. */
3642 offset32 += 4;
3643 ret_buf += 4;
3644 len32 -= 4;
3645 }
3646
3647 if (rc)
3648 return rc;
3649
3650 cmd_flags = BNX2_NVM_COMMAND_LAST;
3651 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3652
3653 memcpy(ret_buf, buf, 4 - extra);
3654 }
3655
3656 /* Disable access to flash interface */
3657 bnx2_disable_nvram_access(bp);
3658
3659 bnx2_release_nvram_lock(bp);
3660
3661 return rc;
3662}
3663
3664static int
3665bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3666 int buf_size)
3667{
3668 u32 written, offset32, len32;
e6be763f 3669 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
3670 int rc = 0;
3671 int align_start, align_end;
3672
3673 buf = data_buf;
3674 offset32 = offset;
3675 len32 = buf_size;
3676 align_start = align_end = 0;
3677
3678 if ((align_start = (offset32 & 3))) {
3679 offset32 &= ~3;
c873879c
MC
3680 len32 += align_start;
3681 if (len32 < 4)
3682 len32 = 4;
b6016b76
MC
3683 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3684 return rc;
3685 }
3686
3687 if (len32 & 3) {
c873879c
MC
3688 align_end = 4 - (len32 & 3);
3689 len32 += align_end;
3690 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3691 return rc;
b6016b76
MC
3692 }
3693
3694 if (align_start || align_end) {
e6be763f
MC
3695 align_buf = kmalloc(len32, GFP_KERNEL);
3696 if (align_buf == NULL)
b6016b76
MC
3697 return -ENOMEM;
3698 if (align_start) {
e6be763f 3699 memcpy(align_buf, start, 4);
b6016b76
MC
3700 }
3701 if (align_end) {
e6be763f 3702 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 3703 }
e6be763f
MC
3704 memcpy(align_buf + align_start, data_buf, buf_size);
3705 buf = align_buf;
b6016b76
MC
3706 }
3707
ae181bc4
MC
3708 if (bp->flash_info->buffered == 0) {
3709 flash_buffer = kmalloc(264, GFP_KERNEL);
3710 if (flash_buffer == NULL) {
3711 rc = -ENOMEM;
3712 goto nvram_write_end;
3713 }
3714 }
3715
b6016b76
MC
3716 written = 0;
3717 while ((written < len32) && (rc == 0)) {
3718 u32 page_start, page_end, data_start, data_end;
3719 u32 addr, cmd_flags;
3720 int i;
b6016b76
MC
3721
3722 /* Find the page_start addr */
3723 page_start = offset32 + written;
3724 page_start -= (page_start % bp->flash_info->page_size);
3725 /* Find the page_end addr */
3726 page_end = page_start + bp->flash_info->page_size;
3727 /* Find the data_start addr */
3728 data_start = (written == 0) ? offset32 : page_start;
3729 /* Find the data_end addr */
6aa20a22 3730 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
3731 (offset32 + len32) : page_end;
3732
3733 /* Request access to the flash interface. */
3734 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3735 goto nvram_write_end;
3736
3737 /* Enable access to flash interface */
3738 bnx2_enable_nvram_access(bp);
3739
3740 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3741 if (bp->flash_info->buffered == 0) {
3742 int j;
3743
3744 /* Read the whole page into the buffer
3745 * (non-buffer flash only) */
3746 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3747 if (j == (bp->flash_info->page_size - 4)) {
3748 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3749 }
3750 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
3751 page_start + j,
3752 &flash_buffer[j],
b6016b76
MC
3753 cmd_flags);
3754
3755 if (rc)
3756 goto nvram_write_end;
3757
3758 cmd_flags = 0;
3759 }
3760 }
3761
3762 /* Enable writes to flash interface (unlock write-protect) */
3763 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3764 goto nvram_write_end;
3765
b6016b76
MC
3766 /* Loop to write back the buffer data from page_start to
3767 * data_start */
3768 i = 0;
3769 if (bp->flash_info->buffered == 0) {
c873879c
MC
3770 /* Erase the page */
3771 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3772 goto nvram_write_end;
3773
3774 /* Re-enable the write again for the actual write */
3775 bnx2_enable_nvram_write(bp);
3776
b6016b76
MC
3777 for (addr = page_start; addr < data_start;
3778 addr += 4, i += 4) {
6aa20a22 3779
b6016b76
MC
3780 rc = bnx2_nvram_write_dword(bp, addr,
3781 &flash_buffer[i], cmd_flags);
3782
3783 if (rc != 0)
3784 goto nvram_write_end;
3785
3786 cmd_flags = 0;
3787 }
3788 }
3789
3790 /* Loop to write the new data from data_start to data_end */
bae25761 3791 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76
MC
3792 if ((addr == page_end - 4) ||
3793 ((bp->flash_info->buffered) &&
3794 (addr == data_end - 4))) {
3795
3796 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3797 }
3798 rc = bnx2_nvram_write_dword(bp, addr, buf,
3799 cmd_flags);
3800
3801 if (rc != 0)
3802 goto nvram_write_end;
3803
3804 cmd_flags = 0;
3805 buf += 4;
3806 }
3807
3808 /* Loop to write back the buffer data from data_end
3809 * to page_end */
3810 if (bp->flash_info->buffered == 0) {
3811 for (addr = data_end; addr < page_end;
3812 addr += 4, i += 4) {
6aa20a22 3813
b6016b76
MC
3814 if (addr == page_end-4) {
3815 cmd_flags = BNX2_NVM_COMMAND_LAST;
3816 }
3817 rc = bnx2_nvram_write_dword(bp, addr,
3818 &flash_buffer[i], cmd_flags);
3819
3820 if (rc != 0)
3821 goto nvram_write_end;
3822
3823 cmd_flags = 0;
3824 }
3825 }
3826
3827 /* Disable writes to flash interface (lock write-protect) */
3828 bnx2_disable_nvram_write(bp);
3829
3830 /* Disable access to flash interface */
3831 bnx2_disable_nvram_access(bp);
3832 bnx2_release_nvram_lock(bp);
3833
3834 /* Increment written */
3835 written += data_end - data_start;
3836 }
3837
3838nvram_write_end:
e6be763f
MC
3839 kfree(flash_buffer);
3840 kfree(align_buf);
b6016b76
MC
3841 return rc;
3842}
3843
0d8a6571
MC
3844static void
3845bnx2_init_remote_phy(struct bnx2 *bp)
3846{
3847 u32 val;
3848
3849 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3850 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3851 return;
3852
3853 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3854 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3855 return;
3856
3857 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3858 if (netif_running(bp->dev)) {
3859 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3860 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3861 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3862 val);
3863 }
3864 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3865
3866 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3867 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3868 bp->phy_port = PORT_FIBRE;
3869 else
3870 bp->phy_port = PORT_TP;
3871 }
3872}
3873
b6016b76
MC
3874static int
3875bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3876{
3877 u32 val;
3878 int i, rc = 0;
3879
3880 /* Wait for the current PCI transaction to complete before
3881 * issuing a reset. */
3882 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3883 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3884 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3885 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3886 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3887 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3888 udelay(5);
3889
b090ae2b
MC
3890 /* Wait for the firmware to tell us it is ok to issue a reset. */
3891 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3892
b6016b76
MC
3893 /* Deposit a driver reset signature so the firmware knows that
3894 * this is a soft reset. */
e3648b3d 3895 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
3896 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3897
b6016b76
MC
3898 /* Do a dummy read to force the chip to complete all current transaction
3899 * before we issue a reset. */
3900 val = REG_RD(bp, BNX2_MISC_ID);
3901
234754d5
MC
3902 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3903 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3904 REG_RD(bp, BNX2_MISC_COMMAND);
3905 udelay(5);
b6016b76 3906
234754d5
MC
3907 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3908 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 3909
234754d5 3910 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 3911
234754d5
MC
3912 } else {
3913 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3914 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3915 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3916
3917 /* Chip reset. */
3918 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3919
3920 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3921 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3922 current->state = TASK_UNINTERRUPTIBLE;
3923 schedule_timeout(HZ / 50);
b6016b76 3924 }
b6016b76 3925
234754d5
MC
3926 /* Reset takes approximate 30 usec */
3927 for (i = 0; i < 10; i++) {
3928 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3929 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3930 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3931 break;
3932 udelay(10);
3933 }
3934
3935 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3936 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3937 printk(KERN_ERR PFX "Chip reset did not complete\n");
3938 return -EBUSY;
3939 }
b6016b76
MC
3940 }
3941
3942 /* Make sure byte swapping is properly configured. */
3943 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3944 if (val != 0x01020304) {
3945 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3946 return -ENODEV;
3947 }
3948
b6016b76 3949 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
3950 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3951 if (rc)
3952 return rc;
b6016b76 3953
0d8a6571
MC
3954 spin_lock_bh(&bp->phy_lock);
3955 bnx2_init_remote_phy(bp);
3956 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3957 bnx2_set_default_remote_link(bp);
3958 spin_unlock_bh(&bp->phy_lock);
3959
b6016b76
MC
3960 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3961 /* Adjust the voltage regular to two steps lower. The default
3962 * of this register is 0x0000000e. */
3963 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3964
3965 /* Remove bad rbuf memory from the free pool. */
3966 rc = bnx2_alloc_bad_rbuf(bp);
3967 }
3968
3969 return rc;
3970}
3971
3972static int
3973bnx2_init_chip(struct bnx2 *bp)
3974{
3975 u32 val;
b090ae2b 3976 int rc;
b6016b76
MC
3977
3978 /* Make sure the interrupt is not active. */
3979 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3980
3981 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3982 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3983#ifdef __BIG_ENDIAN
6aa20a22 3984 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 3985#endif
6aa20a22 3986 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
3987 DMA_READ_CHANS << 12 |
3988 DMA_WRITE_CHANS << 16;
3989
3990 val |= (0x2 << 20) | (1 << 11);
3991
dda1e390 3992 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
3993 val |= (1 << 23);
3994
3995 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3996 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3997 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3998
3999 REG_WR(bp, BNX2_DMA_CONFIG, val);
4000
4001 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4002 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4003 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4004 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4005 }
4006
4007 if (bp->flags & PCIX_FLAG) {
4008 u16 val16;
4009
4010 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4011 &val16);
4012 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4013 val16 & ~PCI_X_CMD_ERO);
4014 }
4015
4016 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4017 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4018 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4019 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4020
4021 /* Initialize context mapping and zero out the quick contexts. The
4022 * context block must have already been enabled. */
641bdcd5
MC
4023 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4024 rc = bnx2_init_5709_context(bp);
4025 if (rc)
4026 return rc;
4027 } else
59b47d8a 4028 bnx2_init_context(bp);
b6016b76 4029
fba9fe91
MC
4030 if ((rc = bnx2_init_cpus(bp)) != 0)
4031 return rc;
4032
b6016b76
MC
4033 bnx2_init_nvram(bp);
4034
4035 bnx2_set_mac_addr(bp);
4036
4037 val = REG_RD(bp, BNX2_MQ_CONFIG);
4038 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4039 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
4040 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4041 val |= BNX2_MQ_CONFIG_HALT_DIS;
4042
b6016b76
MC
4043 REG_WR(bp, BNX2_MQ_CONFIG, val);
4044
4045 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4046 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4047 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4048
4049 val = (BCM_PAGE_BITS - 8) << 24;
4050 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4051
4052 /* Configure page size. */
4053 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4054 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4055 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4056 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4057
4058 val = bp->mac_addr[0] +
4059 (bp->mac_addr[1] << 8) +
4060 (bp->mac_addr[2] << 16) +
4061 bp->mac_addr[3] +
4062 (bp->mac_addr[4] << 8) +
4063 (bp->mac_addr[5] << 16);
4064 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4065
4066 /* Program the MTU. Also include 4 bytes for CRC32. */
4067 val = bp->dev->mtu + ETH_HLEN + 4;
4068 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4069 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4070 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4071
4072 bp->last_status_idx = 0;
4073 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4074
4075 /* Set up how to generate a link change interrupt. */
4076 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4077
4078 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4079 (u64) bp->status_blk_mapping & 0xffffffff);
4080 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4081
4082 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4083 (u64) bp->stats_blk_mapping & 0xffffffff);
4084 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4085 (u64) bp->stats_blk_mapping >> 32);
4086
6aa20a22 4087 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4088 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4089
4090 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4091 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4092
4093 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4094 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4095
4096 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4097
4098 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4099
4100 REG_WR(bp, BNX2_HC_COM_TICKS,
4101 (bp->com_ticks_int << 16) | bp->com_ticks);
4102
4103 REG_WR(bp, BNX2_HC_CMD_TICKS,
4104 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4105
02537b06
MC
4106 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4107 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4108 else
4109 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
b6016b76
MC
4110 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4111
4112 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4113 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4114 else {
8e6a72c4
MC
4115 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4116 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4117 }
4118
8e6a72c4
MC
4119 if (bp->flags & ONE_SHOT_MSI_FLAG)
4120 val |= BNX2_HC_CONFIG_ONE_SHOT;
4121
4122 REG_WR(bp, BNX2_HC_CONFIG, val);
4123
b6016b76
MC
4124 /* Clear internal stats counters. */
4125 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4126
da3e4fbe 4127 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76 4128
e29054f9
MC
4129 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4130 BNX2_PORT_FEATURE_ASF_ENABLED)
4131 bp->flags |= ASF_ENABLE_FLAG;
4132
b6016b76
MC
4133 /* Initialize the receive filter. */
4134 bnx2_set_rx_mode(bp->dev);
4135
0aa38df7
MC
4136 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4137 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4138 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4139 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4140 }
b090ae2b
MC
4141 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4142 0);
b6016b76 4143
df149d70 4144 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4145 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4146
4147 udelay(20);
4148
bf5295bb
MC
4149 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4150
b090ae2b 4151 return rc;
b6016b76
MC
4152}
4153
59b47d8a
MC
4154static void
4155bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4156{
4157 u32 val, offset0, offset1, offset2, offset3;
4158
4159 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4160 offset0 = BNX2_L2CTX_TYPE_XI;
4161 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4162 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4163 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4164 } else {
4165 offset0 = BNX2_L2CTX_TYPE;
4166 offset1 = BNX2_L2CTX_CMD_TYPE;
4167 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4168 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4169 }
4170 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4171 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4172
4173 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4174 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4175
4176 val = (u64) bp->tx_desc_mapping >> 32;
4177 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4178
4179 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4180 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4181}
b6016b76
MC
4182
4183static void
4184bnx2_init_tx_ring(struct bnx2 *bp)
4185{
4186 struct tx_bd *txbd;
59b47d8a 4187 u32 cid;
b6016b76 4188
2f8af120
MC
4189 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4190
b6016b76 4191 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4192
b6016b76
MC
4193 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4194 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4195
4196 bp->tx_prod = 0;
4197 bp->tx_cons = 0;
f4e418f7 4198 bp->hw_tx_cons = 0;
b6016b76 4199 bp->tx_prod_bseq = 0;
6aa20a22 4200
59b47d8a
MC
4201 cid = TX_CID;
4202 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4203 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4204
59b47d8a 4205 bnx2_init_tx_context(bp, cid);
b6016b76
MC
4206}
4207
4208static void
4209bnx2_init_rx_ring(struct bnx2 *bp)
4210{
4211 struct rx_bd *rxbd;
4212 int i;
6aa20a22 4213 u16 prod, ring_prod;
b6016b76
MC
4214 u32 val;
4215
4216 /* 8 for CRC and VLAN */
4217 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
59b47d8a
MC
4218 /* hw alignment */
4219 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
b6016b76
MC
4220
4221 ring_prod = prod = bp->rx_prod = 0;
4222 bp->rx_cons = 0;
f4e418f7 4223 bp->hw_rx_cons = 0;
b6016b76 4224 bp->rx_prod_bseq = 0;
6aa20a22 4225
13daffa2
MC
4226 for (i = 0; i < bp->rx_max_ring; i++) {
4227 int j;
b6016b76 4228
13daffa2
MC
4229 rxbd = &bp->rx_desc_ring[i][0];
4230 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4231 rxbd->rx_bd_len = bp->rx_buf_use_size;
4232 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4233 }
4234 if (i == (bp->rx_max_ring - 1))
4235 j = 0;
4236 else
4237 j = i + 1;
4238 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4239 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4240 0xffffffff;
4241 }
b6016b76
MC
4242
4243 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4244 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4245 val |= 0x02 << 8;
4246 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4247
13daffa2 4248 val = (u64) bp->rx_desc_mapping[0] >> 32;
b6016b76
MC
4249 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4250
13daffa2 4251 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
b6016b76
MC
4252 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4253
236b6394 4254 for (i = 0; i < bp->rx_ring_size; i++) {
b6016b76
MC
4255 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4256 break;
4257 }
4258 prod = NEXT_RX_BD(prod);
4259 ring_prod = RX_RING_IDX(prod);
4260 }
4261 bp->rx_prod = prod;
4262
4263 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4264
4265 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4266}
4267
13daffa2
MC
4268static void
4269bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4270{
4271 u32 num_rings, max;
4272
4273 bp->rx_ring_size = size;
4274 num_rings = 1;
4275 while (size > MAX_RX_DESC_CNT) {
4276 size -= MAX_RX_DESC_CNT;
4277 num_rings++;
4278 }
4279 /* round to next power of 2 */
4280 max = MAX_RX_RINGS;
4281 while ((max & num_rings) == 0)
4282 max >>= 1;
4283
4284 if (num_rings != max)
4285 max <<= 1;
4286
4287 bp->rx_max_ring = max;
4288 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4289}
4290
b6016b76
MC
4291static void
4292bnx2_free_tx_skbs(struct bnx2 *bp)
4293{
4294 int i;
4295
4296 if (bp->tx_buf_ring == NULL)
4297 return;
4298
4299 for (i = 0; i < TX_DESC_CNT; ) {
4300 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4301 struct sk_buff *skb = tx_buf->skb;
4302 int j, last;
4303
4304 if (skb == NULL) {
4305 i++;
4306 continue;
4307 }
4308
4309 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4310 skb_headlen(skb), PCI_DMA_TODEVICE);
4311
4312 tx_buf->skb = NULL;
4313
4314 last = skb_shinfo(skb)->nr_frags;
4315 for (j = 0; j < last; j++) {
4316 tx_buf = &bp->tx_buf_ring[i + j + 1];
4317 pci_unmap_page(bp->pdev,
4318 pci_unmap_addr(tx_buf, mapping),
4319 skb_shinfo(skb)->frags[j].size,
4320 PCI_DMA_TODEVICE);
4321 }
745720e5 4322 dev_kfree_skb(skb);
b6016b76
MC
4323 i += j + 1;
4324 }
4325
4326}
4327
4328static void
4329bnx2_free_rx_skbs(struct bnx2 *bp)
4330{
4331 int i;
4332
4333 if (bp->rx_buf_ring == NULL)
4334 return;
4335
13daffa2 4336 for (i = 0; i < bp->rx_max_ring_idx; i++) {
b6016b76
MC
4337 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4338 struct sk_buff *skb = rx_buf->skb;
4339
05d0f1cf 4340 if (skb == NULL)
b6016b76
MC
4341 continue;
4342
4343 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4344 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4345
4346 rx_buf->skb = NULL;
4347
745720e5 4348 dev_kfree_skb(skb);
b6016b76
MC
4349 }
4350}
4351
4352static void
4353bnx2_free_skbs(struct bnx2 *bp)
4354{
4355 bnx2_free_tx_skbs(bp);
4356 bnx2_free_rx_skbs(bp);
4357}
4358
4359static int
4360bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4361{
4362 int rc;
4363
4364 rc = bnx2_reset_chip(bp, reset_code);
4365 bnx2_free_skbs(bp);
4366 if (rc)
4367 return rc;
4368
fba9fe91
MC
4369 if ((rc = bnx2_init_chip(bp)) != 0)
4370 return rc;
4371
b6016b76
MC
4372 bnx2_init_tx_ring(bp);
4373 bnx2_init_rx_ring(bp);
4374 return 0;
4375}
4376
4377static int
4378bnx2_init_nic(struct bnx2 *bp)
4379{
4380 int rc;
4381
4382 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4383 return rc;
4384
80be4434 4385 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4386 bnx2_init_phy(bp);
4387 bnx2_set_link(bp);
0d8a6571 4388 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4389 return 0;
4390}
4391
4392static int
4393bnx2_test_registers(struct bnx2 *bp)
4394{
4395 int ret;
5bae30c9 4396 int i, is_5709;
f71e1309 4397 static const struct {
b6016b76
MC
4398 u16 offset;
4399 u16 flags;
5bae30c9 4400#define BNX2_FL_NOT_5709 1
b6016b76
MC
4401 u32 rw_mask;
4402 u32 ro_mask;
4403 } reg_tbl[] = {
4404 { 0x006c, 0, 0x00000000, 0x0000003f },
4405 { 0x0090, 0, 0xffffffff, 0x00000000 },
4406 { 0x0094, 0, 0x00000000, 0x00000000 },
4407
5bae30c9
MC
4408 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4409 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4410 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4411 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4412 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4413 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4414 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4415 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4416 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4417
4418 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4419 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4420 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4421 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4422 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4423 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4424
4425 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4426 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4427 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
4428
4429 { 0x1000, 0, 0x00000000, 0x00000001 },
4430 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
4431
4432 { 0x1408, 0, 0x01c00800, 0x00000000 },
4433 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4434 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 4435 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
4436 { 0x14b0, 0, 0x00000002, 0x00000001 },
4437 { 0x14b8, 0, 0x00000000, 0x00000000 },
4438 { 0x14c0, 0, 0x00000000, 0x00000009 },
4439 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4440 { 0x14cc, 0, 0x00000000, 0x00000001 },
4441 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
4442
4443 { 0x1800, 0, 0x00000000, 0x00000001 },
4444 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
4445
4446 { 0x2800, 0, 0x00000000, 0x00000001 },
4447 { 0x2804, 0, 0x00000000, 0x00003f01 },
4448 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4449 { 0x2810, 0, 0xffff0000, 0x00000000 },
4450 { 0x2814, 0, 0xffff0000, 0x00000000 },
4451 { 0x2818, 0, 0xffff0000, 0x00000000 },
4452 { 0x281c, 0, 0xffff0000, 0x00000000 },
4453 { 0x2834, 0, 0xffffffff, 0x00000000 },
4454 { 0x2840, 0, 0x00000000, 0xffffffff },
4455 { 0x2844, 0, 0x00000000, 0xffffffff },
4456 { 0x2848, 0, 0xffffffff, 0x00000000 },
4457 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4458
4459 { 0x2c00, 0, 0x00000000, 0x00000011 },
4460 { 0x2c04, 0, 0x00000000, 0x00030007 },
4461
b6016b76
MC
4462 { 0x3c00, 0, 0x00000000, 0x00000001 },
4463 { 0x3c04, 0, 0x00000000, 0x00070000 },
4464 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4465 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4466 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4467 { 0x3c14, 0, 0x00000000, 0xffffffff },
4468 { 0x3c18, 0, 0x00000000, 0xffffffff },
4469 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4470 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
4471
4472 { 0x5004, 0, 0x00000000, 0x0000007f },
4473 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 4474
b6016b76
MC
4475 { 0x5c00, 0, 0x00000000, 0x00000001 },
4476 { 0x5c04, 0, 0x00000000, 0x0003000f },
4477 { 0x5c08, 0, 0x00000003, 0x00000000 },
4478 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4479 { 0x5c10, 0, 0x00000000, 0xffffffff },
4480 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4481 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4482 { 0x5c88, 0, 0x00000000, 0x00077373 },
4483 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4484
4485 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4486 { 0x680c, 0, 0xffffffff, 0x00000000 },
4487 { 0x6810, 0, 0xffffffff, 0x00000000 },
4488 { 0x6814, 0, 0xffffffff, 0x00000000 },
4489 { 0x6818, 0, 0xffffffff, 0x00000000 },
4490 { 0x681c, 0, 0xffffffff, 0x00000000 },
4491 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4492 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4493 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4494 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4495 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4496 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4497 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4498 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4499 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4500 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4501 { 0x684c, 0, 0xffffffff, 0x00000000 },
4502 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4503 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4504 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4505 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4506 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4507 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4508
4509 { 0xffff, 0, 0x00000000, 0x00000000 },
4510 };
4511
4512 ret = 0;
5bae30c9
MC
4513 is_5709 = 0;
4514 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4515 is_5709 = 1;
4516
b6016b76
MC
4517 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4518 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
4519 u16 flags = reg_tbl[i].flags;
4520
4521 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4522 continue;
b6016b76
MC
4523
4524 offset = (u32) reg_tbl[i].offset;
4525 rw_mask = reg_tbl[i].rw_mask;
4526 ro_mask = reg_tbl[i].ro_mask;
4527
14ab9b86 4528 save_val = readl(bp->regview + offset);
b6016b76 4529
14ab9b86 4530 writel(0, bp->regview + offset);
b6016b76 4531
14ab9b86 4532 val = readl(bp->regview + offset);
b6016b76
MC
4533 if ((val & rw_mask) != 0) {
4534 goto reg_test_err;
4535 }
4536
4537 if ((val & ro_mask) != (save_val & ro_mask)) {
4538 goto reg_test_err;
4539 }
4540
14ab9b86 4541 writel(0xffffffff, bp->regview + offset);
b6016b76 4542
14ab9b86 4543 val = readl(bp->regview + offset);
b6016b76
MC
4544 if ((val & rw_mask) != rw_mask) {
4545 goto reg_test_err;
4546 }
4547
4548 if ((val & ro_mask) != (save_val & ro_mask)) {
4549 goto reg_test_err;
4550 }
4551
14ab9b86 4552 writel(save_val, bp->regview + offset);
b6016b76
MC
4553 continue;
4554
4555reg_test_err:
14ab9b86 4556 writel(save_val, bp->regview + offset);
b6016b76
MC
4557 ret = -ENODEV;
4558 break;
4559 }
4560 return ret;
4561}
4562
4563static int
4564bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4565{
f71e1309 4566 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
4567 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4568 int i;
4569
4570 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4571 u32 offset;
4572
4573 for (offset = 0; offset < size; offset += 4) {
4574
4575 REG_WR_IND(bp, start + offset, test_pattern[i]);
4576
4577 if (REG_RD_IND(bp, start + offset) !=
4578 test_pattern[i]) {
4579 return -ENODEV;
4580 }
4581 }
4582 }
4583 return 0;
4584}
4585
4586static int
4587bnx2_test_memory(struct bnx2 *bp)
4588{
4589 int ret = 0;
4590 int i;
5bae30c9 4591 static struct mem_entry {
b6016b76
MC
4592 u32 offset;
4593 u32 len;
5bae30c9 4594 } mem_tbl_5706[] = {
b6016b76 4595 { 0x60000, 0x4000 },
5b0c76ad 4596 { 0xa0000, 0x3000 },
b6016b76
MC
4597 { 0xe0000, 0x4000 },
4598 { 0x120000, 0x4000 },
4599 { 0x1a0000, 0x4000 },
4600 { 0x160000, 0x4000 },
4601 { 0xffffffff, 0 },
5bae30c9
MC
4602 },
4603 mem_tbl_5709[] = {
4604 { 0x60000, 0x4000 },
4605 { 0xa0000, 0x3000 },
4606 { 0xe0000, 0x4000 },
4607 { 0x120000, 0x4000 },
4608 { 0x1a0000, 0x4000 },
4609 { 0xffffffff, 0 },
b6016b76 4610 };
5bae30c9
MC
4611 struct mem_entry *mem_tbl;
4612
4613 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4614 mem_tbl = mem_tbl_5709;
4615 else
4616 mem_tbl = mem_tbl_5706;
b6016b76
MC
4617
4618 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4619 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4620 mem_tbl[i].len)) != 0) {
4621 return ret;
4622 }
4623 }
6aa20a22 4624
b6016b76
MC
4625 return ret;
4626}
4627
bc5a0690
MC
4628#define BNX2_MAC_LOOPBACK 0
4629#define BNX2_PHY_LOOPBACK 1
4630
b6016b76 4631static int
bc5a0690 4632bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
4633{
4634 unsigned int pkt_size, num_pkts, i;
4635 struct sk_buff *skb, *rx_skb;
4636 unsigned char *packet;
bc5a0690 4637 u16 rx_start_idx, rx_idx;
b6016b76
MC
4638 dma_addr_t map;
4639 struct tx_bd *txbd;
4640 struct sw_bd *rx_buf;
4641 struct l2_fhdr *rx_hdr;
4642 int ret = -ENODEV;
4643
bc5a0690
MC
4644 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4645 bp->loopback = MAC_LOOPBACK;
4646 bnx2_set_mac_loopback(bp);
4647 }
4648 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
80be4434 4649 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
4650 bnx2_set_phy_loopback(bp);
4651 }
4652 else
4653 return -EINVAL;
b6016b76
MC
4654
4655 pkt_size = 1514;
932f3772 4656 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
4657 if (!skb)
4658 return -ENOMEM;
b6016b76 4659 packet = skb_put(skb, pkt_size);
6634292b 4660 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
4661 memset(packet + 6, 0x0, 8);
4662 for (i = 14; i < pkt_size; i++)
4663 packet[i] = (unsigned char) (i & 0xff);
4664
4665 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4666 PCI_DMA_TODEVICE);
4667
bf5295bb
MC
4668 REG_WR(bp, BNX2_HC_COMMAND,
4669 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4670
b6016b76
MC
4671 REG_RD(bp, BNX2_HC_COMMAND);
4672
4673 udelay(5);
4674 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4675
b6016b76
MC
4676 num_pkts = 0;
4677
bc5a0690 4678 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
4679
4680 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4681 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4682 txbd->tx_bd_mss_nbytes = pkt_size;
4683 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4684
4685 num_pkts++;
bc5a0690
MC
4686 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4687 bp->tx_prod_bseq += pkt_size;
b6016b76 4688
234754d5
MC
4689 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4690 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
4691
4692 udelay(100);
4693
bf5295bb
MC
4694 REG_WR(bp, BNX2_HC_COMMAND,
4695 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4696
b6016b76
MC
4697 REG_RD(bp, BNX2_HC_COMMAND);
4698
4699 udelay(5);
4700
4701 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 4702 dev_kfree_skb(skb);
b6016b76 4703
bc5a0690 4704 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
b6016b76
MC
4705 goto loopback_test_done;
4706 }
4707
4708 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4709 if (rx_idx != rx_start_idx + num_pkts) {
4710 goto loopback_test_done;
4711 }
4712
4713 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4714 rx_skb = rx_buf->skb;
4715
4716 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4717 skb_reserve(rx_skb, bp->rx_offset);
4718
4719 pci_dma_sync_single_for_cpu(bp->pdev,
4720 pci_unmap_addr(rx_buf, mapping),
4721 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4722
ade2bfe7 4723 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
4724 (L2_FHDR_ERRORS_BAD_CRC |
4725 L2_FHDR_ERRORS_PHY_DECODE |
4726 L2_FHDR_ERRORS_ALIGNMENT |
4727 L2_FHDR_ERRORS_TOO_SHORT |
4728 L2_FHDR_ERRORS_GIANT_FRAME)) {
4729
4730 goto loopback_test_done;
4731 }
4732
4733 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4734 goto loopback_test_done;
4735 }
4736
4737 for (i = 14; i < pkt_size; i++) {
4738 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4739 goto loopback_test_done;
4740 }
4741 }
4742
4743 ret = 0;
4744
4745loopback_test_done:
4746 bp->loopback = 0;
4747 return ret;
4748}
4749
bc5a0690
MC
4750#define BNX2_MAC_LOOPBACK_FAILED 1
4751#define BNX2_PHY_LOOPBACK_FAILED 2
4752#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4753 BNX2_PHY_LOOPBACK_FAILED)
4754
4755static int
4756bnx2_test_loopback(struct bnx2 *bp)
4757{
4758 int rc = 0;
4759
4760 if (!netif_running(bp->dev))
4761 return BNX2_LOOPBACK_FAILED;
4762
4763 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4764 spin_lock_bh(&bp->phy_lock);
4765 bnx2_init_phy(bp);
4766 spin_unlock_bh(&bp->phy_lock);
4767 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4768 rc |= BNX2_MAC_LOOPBACK_FAILED;
4769 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4770 rc |= BNX2_PHY_LOOPBACK_FAILED;
4771 return rc;
4772}
4773
b6016b76
MC
4774#define NVRAM_SIZE 0x200
4775#define CRC32_RESIDUAL 0xdebb20e3
4776
4777static int
4778bnx2_test_nvram(struct bnx2 *bp)
4779{
4780 u32 buf[NVRAM_SIZE / 4];
4781 u8 *data = (u8 *) buf;
4782 int rc = 0;
4783 u32 magic, csum;
4784
4785 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4786 goto test_nvram_done;
4787
4788 magic = be32_to_cpu(buf[0]);
4789 if (magic != 0x669955aa) {
4790 rc = -ENODEV;
4791 goto test_nvram_done;
4792 }
4793
4794 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4795 goto test_nvram_done;
4796
4797 csum = ether_crc_le(0x100, data);
4798 if (csum != CRC32_RESIDUAL) {
4799 rc = -ENODEV;
4800 goto test_nvram_done;
4801 }
4802
4803 csum = ether_crc_le(0x100, data + 0x100);
4804 if (csum != CRC32_RESIDUAL) {
4805 rc = -ENODEV;
4806 }
4807
4808test_nvram_done:
4809 return rc;
4810}
4811
4812static int
4813bnx2_test_link(struct bnx2 *bp)
4814{
4815 u32 bmsr;
4816
c770a65c 4817 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
4818 bnx2_enable_bmsr1(bp);
4819 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4820 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4821 bnx2_disable_bmsr1(bp);
c770a65c 4822 spin_unlock_bh(&bp->phy_lock);
6aa20a22 4823
b6016b76
MC
4824 if (bmsr & BMSR_LSTATUS) {
4825 return 0;
4826 }
4827 return -ENODEV;
4828}
4829
4830static int
4831bnx2_test_intr(struct bnx2 *bp)
4832{
4833 int i;
b6016b76
MC
4834 u16 status_idx;
4835
4836 if (!netif_running(bp->dev))
4837 return -ENODEV;
4838
4839 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4840
4841 /* This register is not touched during run-time. */
bf5295bb 4842 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
4843 REG_RD(bp, BNX2_HC_COMMAND);
4844
4845 for (i = 0; i < 10; i++) {
4846 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4847 status_idx) {
4848
4849 break;
4850 }
4851
4852 msleep_interruptible(10);
4853 }
4854 if (i < 10)
4855 return 0;
4856
4857 return -ENODEV;
4858}
4859
4860static void
48b01e2d 4861bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 4862{
48b01e2d
MC
4863 spin_lock(&bp->phy_lock);
4864 if (bp->serdes_an_pending)
4865 bp->serdes_an_pending--;
4866 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4867 u32 bmcr;
b6016b76 4868
48b01e2d 4869 bp->current_interval = bp->timer_interval;
cd339a0e 4870
ca58c3af 4871 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 4872
48b01e2d
MC
4873 if (bmcr & BMCR_ANENABLE) {
4874 u32 phy1, phy2;
b6016b76 4875
48b01e2d
MC
4876 bnx2_write_phy(bp, 0x1c, 0x7c00);
4877 bnx2_read_phy(bp, 0x1c, &phy1);
cea94db9 4878
48b01e2d
MC
4879 bnx2_write_phy(bp, 0x17, 0x0f01);
4880 bnx2_read_phy(bp, 0x15, &phy2);
4881 bnx2_write_phy(bp, 0x17, 0x0f01);
4882 bnx2_read_phy(bp, 0x15, &phy2);
b6016b76 4883
48b01e2d
MC
4884 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4885 !(phy2 & 0x20)) { /* no CONFIG */
4886
4887 bmcr &= ~BMCR_ANENABLE;
4888 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 4889 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
48b01e2d
MC
4890 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4891 }
b6016b76 4892 }
48b01e2d
MC
4893 }
4894 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4895 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4896 u32 phy2;
b6016b76 4897
48b01e2d
MC
4898 bnx2_write_phy(bp, 0x17, 0x0f01);
4899 bnx2_read_phy(bp, 0x15, &phy2);
4900 if (phy2 & 0x20) {
4901 u32 bmcr;
cd339a0e 4902
ca58c3af 4903 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 4904 bmcr |= BMCR_ANENABLE;
ca58c3af 4905 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 4906
48b01e2d
MC
4907 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4908 }
4909 } else
4910 bp->current_interval = bp->timer_interval;
b6016b76 4911
48b01e2d
MC
4912 spin_unlock(&bp->phy_lock);
4913}
b6016b76 4914
f8dd064e
MC
4915static void
4916bnx2_5708_serdes_timer(struct bnx2 *bp)
4917{
0d8a6571
MC
4918 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4919 return;
4920
f8dd064e
MC
4921 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4922 bp->serdes_an_pending = 0;
4923 return;
4924 }
b6016b76 4925
f8dd064e
MC
4926 spin_lock(&bp->phy_lock);
4927 if (bp->serdes_an_pending)
4928 bp->serdes_an_pending--;
4929 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4930 u32 bmcr;
b6016b76 4931
ca58c3af 4932 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 4933 if (bmcr & BMCR_ANENABLE) {
605a9e20 4934 bnx2_enable_forced_2g5(bp);
f8dd064e
MC
4935 bp->current_interval = SERDES_FORCED_TIMEOUT;
4936 } else {
605a9e20 4937 bnx2_disable_forced_2g5(bp);
f8dd064e
MC
4938 bp->serdes_an_pending = 2;
4939 bp->current_interval = bp->timer_interval;
b6016b76 4940 }
b6016b76 4941
f8dd064e
MC
4942 } else
4943 bp->current_interval = bp->timer_interval;
b6016b76 4944
f8dd064e
MC
4945 spin_unlock(&bp->phy_lock);
4946}
4947
48b01e2d
MC
4948static void
4949bnx2_timer(unsigned long data)
4950{
4951 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 4952
48b01e2d
MC
4953 if (!netif_running(bp->dev))
4954 return;
b6016b76 4955
48b01e2d
MC
4956 if (atomic_read(&bp->intr_sem) != 0)
4957 goto bnx2_restart_timer;
b6016b76 4958
df149d70 4959 bnx2_send_heart_beat(bp);
b6016b76 4960
48b01e2d 4961 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 4962
02537b06
MC
4963 /* workaround occasional corrupted counters */
4964 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4965 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4966 BNX2_HC_COMMAND_STATS_NOW);
4967
f8dd064e
MC
4968 if (bp->phy_flags & PHY_SERDES_FLAG) {
4969 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4970 bnx2_5706_serdes_timer(bp);
27a005b8 4971 else
f8dd064e 4972 bnx2_5708_serdes_timer(bp);
b6016b76
MC
4973 }
4974
4975bnx2_restart_timer:
cd339a0e 4976 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4977}
4978
8e6a72c4
MC
4979static int
4980bnx2_request_irq(struct bnx2 *bp)
4981{
4982 struct net_device *dev = bp->dev;
4983 int rc = 0;
4984
4985 if (bp->flags & USING_MSI_FLAG) {
4986 irq_handler_t fn = bnx2_msi;
4987
4988 if (bp->flags & ONE_SHOT_MSI_FLAG)
4989 fn = bnx2_msi_1shot;
4990
4991 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4992 } else
4993 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4994 IRQF_SHARED, dev->name, dev);
4995 return rc;
4996}
4997
4998static void
4999bnx2_free_irq(struct bnx2 *bp)
5000{
5001 struct net_device *dev = bp->dev;
5002
5003 if (bp->flags & USING_MSI_FLAG) {
5004 free_irq(bp->pdev->irq, dev);
5005 pci_disable_msi(bp->pdev);
5006 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5007 } else
5008 free_irq(bp->pdev->irq, dev);
5009}
5010
b6016b76
MC
5011/* Called with rtnl_lock */
5012static int
5013bnx2_open(struct net_device *dev)
5014{
972ec0d4 5015 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5016 int rc;
5017
1b2f922f
MC
5018 netif_carrier_off(dev);
5019
829ca9a3 5020 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5021 bnx2_disable_int(bp);
5022
5023 rc = bnx2_alloc_mem(bp);
5024 if (rc)
5025 return rc;
5026
8e6a72c4 5027 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
b6016b76
MC
5028 if (pci_enable_msi(bp->pdev) == 0) {
5029 bp->flags |= USING_MSI_FLAG;
8e6a72c4
MC
5030 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5031 bp->flags |= ONE_SHOT_MSI_FLAG;
b6016b76 5032 }
b6016b76 5033 }
8e6a72c4
MC
5034 rc = bnx2_request_irq(bp);
5035
b6016b76
MC
5036 if (rc) {
5037 bnx2_free_mem(bp);
5038 return rc;
5039 }
5040
5041 rc = bnx2_init_nic(bp);
5042
5043 if (rc) {
8e6a72c4 5044 bnx2_free_irq(bp);
b6016b76
MC
5045 bnx2_free_skbs(bp);
5046 bnx2_free_mem(bp);
5047 return rc;
5048 }
6aa20a22 5049
cd339a0e 5050 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5051
5052 atomic_set(&bp->intr_sem, 0);
5053
5054 bnx2_enable_int(bp);
5055
5056 if (bp->flags & USING_MSI_FLAG) {
5057 /* Test MSI to make sure it is working
5058 * If MSI test fails, go back to INTx mode
5059 */
5060 if (bnx2_test_intr(bp) != 0) {
5061 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5062 " using MSI, switching to INTx mode. Please"
5063 " report this failure to the PCI maintainer"
5064 " and include system chipset information.\n",
5065 bp->dev->name);
5066
5067 bnx2_disable_int(bp);
8e6a72c4 5068 bnx2_free_irq(bp);
b6016b76
MC
5069
5070 rc = bnx2_init_nic(bp);
5071
8e6a72c4
MC
5072 if (!rc)
5073 rc = bnx2_request_irq(bp);
5074
b6016b76
MC
5075 if (rc) {
5076 bnx2_free_skbs(bp);
5077 bnx2_free_mem(bp);
5078 del_timer_sync(&bp->timer);
5079 return rc;
5080 }
5081 bnx2_enable_int(bp);
5082 }
5083 }
5084 if (bp->flags & USING_MSI_FLAG) {
5085 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5086 }
5087
5088 netif_start_queue(dev);
5089
5090 return 0;
5091}
5092
5093static void
c4028958 5094bnx2_reset_task(struct work_struct *work)
b6016b76 5095{
c4028958 5096 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 5097
afdc08b9
MC
5098 if (!netif_running(bp->dev))
5099 return;
5100
5101 bp->in_reset_task = 1;
b6016b76
MC
5102 bnx2_netif_stop(bp);
5103
5104 bnx2_init_nic(bp);
5105
5106 atomic_set(&bp->intr_sem, 1);
5107 bnx2_netif_start(bp);
afdc08b9 5108 bp->in_reset_task = 0;
b6016b76
MC
5109}
5110
5111static void
5112bnx2_tx_timeout(struct net_device *dev)
5113{
972ec0d4 5114 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5115
5116 /* This allows the netif to be shutdown gracefully before resetting */
5117 schedule_work(&bp->reset_task);
5118}
5119
5120#ifdef BCM_VLAN
5121/* Called with rtnl_lock */
5122static void
5123bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5124{
972ec0d4 5125 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5126
5127 bnx2_netif_stop(bp);
5128
5129 bp->vlgrp = vlgrp;
5130 bnx2_set_rx_mode(dev);
5131
5132 bnx2_netif_start(bp);
5133}
b6016b76
MC
5134#endif
5135
932ff279 5136/* Called with netif_tx_lock.
2f8af120
MC
5137 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5138 * netif_wake_queue().
b6016b76
MC
5139 */
5140static int
5141bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5142{
972ec0d4 5143 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5144 dma_addr_t mapping;
5145 struct tx_bd *txbd;
5146 struct sw_bd *tx_buf;
5147 u32 len, vlan_tag_flags, last_frag, mss;
5148 u16 prod, ring_prod;
5149 int i;
5150
e89bbf10 5151 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
5152 netif_stop_queue(dev);
5153 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5154 dev->name);
5155
5156 return NETDEV_TX_BUSY;
5157 }
5158 len = skb_headlen(skb);
5159 prod = bp->tx_prod;
5160 ring_prod = TX_RING_IDX(prod);
5161
5162 vlan_tag_flags = 0;
84fa7933 5163 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
5164 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5165 }
5166
5167 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5168 vlan_tag_flags |=
5169 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5170 }
fde82055 5171 if ((mss = skb_shinfo(skb)->gso_size)) {
b6016b76 5172 u32 tcp_opt_len, ip_tcp_len;
eddc9ec5 5173 struct iphdr *iph;
b6016b76 5174
b6016b76
MC
5175 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5176
4666f87a
MC
5177 tcp_opt_len = tcp_optlen(skb);
5178
5179 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5180 u32 tcp_off = skb_transport_offset(skb) -
5181 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 5182
4666f87a
MC
5183 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5184 TX_BD_FLAGS_SW_FLAGS;
5185 if (likely(tcp_off == 0))
5186 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5187 else {
5188 tcp_off >>= 3;
5189 vlan_tag_flags |= ((tcp_off & 0x3) <<
5190 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5191 ((tcp_off & 0x10) <<
5192 TX_BD_FLAGS_TCP6_OFF4_SHL);
5193 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5194 }
5195 } else {
5196 if (skb_header_cloned(skb) &&
5197 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5198 dev_kfree_skb(skb);
5199 return NETDEV_TX_OK;
5200 }
b6016b76 5201
4666f87a
MC
5202 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5203
5204 iph = ip_hdr(skb);
5205 iph->check = 0;
5206 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5207 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5208 iph->daddr, 0,
5209 IPPROTO_TCP,
5210 0);
5211 if (tcp_opt_len || (iph->ihl > 5)) {
5212 vlan_tag_flags |= ((iph->ihl - 5) +
5213 (tcp_opt_len >> 2)) << 8;
5214 }
b6016b76 5215 }
4666f87a 5216 } else
b6016b76 5217 mss = 0;
b6016b76
MC
5218
5219 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 5220
b6016b76
MC
5221 tx_buf = &bp->tx_buf_ring[ring_prod];
5222 tx_buf->skb = skb;
5223 pci_unmap_addr_set(tx_buf, mapping, mapping);
5224
5225 txbd = &bp->tx_desc_ring[ring_prod];
5226
5227 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5228 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5229 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5230 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5231
5232 last_frag = skb_shinfo(skb)->nr_frags;
5233
5234 for (i = 0; i < last_frag; i++) {
5235 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5236
5237 prod = NEXT_TX_BD(prod);
5238 ring_prod = TX_RING_IDX(prod);
5239 txbd = &bp->tx_desc_ring[ring_prod];
5240
5241 len = frag->size;
5242 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5243 len, PCI_DMA_TODEVICE);
5244 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5245 mapping, mapping);
5246
5247 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5248 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5249 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5250 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5251
5252 }
5253 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5254
5255 prod = NEXT_TX_BD(prod);
5256 bp->tx_prod_bseq += skb->len;
5257
234754d5
MC
5258 REG_WR16(bp, bp->tx_bidx_addr, prod);
5259 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
5260
5261 mmiowb();
5262
5263 bp->tx_prod = prod;
5264 dev->trans_start = jiffies;
5265
e89bbf10 5266 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
e89bbf10 5267 netif_stop_queue(dev);
2f8af120 5268 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
e89bbf10 5269 netif_wake_queue(dev);
b6016b76
MC
5270 }
5271
5272 return NETDEV_TX_OK;
5273}
5274
5275/* Called with rtnl_lock */
5276static int
5277bnx2_close(struct net_device *dev)
5278{
972ec0d4 5279 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5280 u32 reset_code;
5281
afdc08b9
MC
5282 /* Calling flush_scheduled_work() may deadlock because
5283 * linkwatch_event() may be on the workqueue and it will try to get
5284 * the rtnl_lock which we are holding.
5285 */
5286 while (bp->in_reset_task)
5287 msleep(1);
5288
b6016b76
MC
5289 bnx2_netif_stop(bp);
5290 del_timer_sync(&bp->timer);
dda1e390 5291 if (bp->flags & NO_WOL_FLAG)
6c4f095e 5292 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 5293 else if (bp->wol)
b6016b76
MC
5294 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5295 else
5296 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5297 bnx2_reset_chip(bp, reset_code);
8e6a72c4 5298 bnx2_free_irq(bp);
b6016b76
MC
5299 bnx2_free_skbs(bp);
5300 bnx2_free_mem(bp);
5301 bp->link_up = 0;
5302 netif_carrier_off(bp->dev);
829ca9a3 5303 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
5304 return 0;
5305}
5306
5307#define GET_NET_STATS64(ctr) \
5308 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5309 (unsigned long) (ctr##_lo)
5310
5311#define GET_NET_STATS32(ctr) \
5312 (ctr##_lo)
5313
5314#if (BITS_PER_LONG == 64)
5315#define GET_NET_STATS GET_NET_STATS64
5316#else
5317#define GET_NET_STATS GET_NET_STATS32
5318#endif
5319
5320static struct net_device_stats *
5321bnx2_get_stats(struct net_device *dev)
5322{
972ec0d4 5323 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5324 struct statistics_block *stats_blk = bp->stats_blk;
5325 struct net_device_stats *net_stats = &bp->net_stats;
5326
5327 if (bp->stats_blk == NULL) {
5328 return net_stats;
5329 }
5330 net_stats->rx_packets =
5331 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5332 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5333 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5334
5335 net_stats->tx_packets =
5336 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5337 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5338 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5339
5340 net_stats->rx_bytes =
5341 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5342
5343 net_stats->tx_bytes =
5344 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5345
6aa20a22 5346 net_stats->multicast =
b6016b76
MC
5347 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5348
6aa20a22 5349 net_stats->collisions =
b6016b76
MC
5350 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5351
6aa20a22 5352 net_stats->rx_length_errors =
b6016b76
MC
5353 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5354 stats_blk->stat_EtherStatsOverrsizePkts);
5355
6aa20a22 5356 net_stats->rx_over_errors =
b6016b76
MC
5357 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5358
6aa20a22 5359 net_stats->rx_frame_errors =
b6016b76
MC
5360 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5361
6aa20a22 5362 net_stats->rx_crc_errors =
b6016b76
MC
5363 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5364
5365 net_stats->rx_errors = net_stats->rx_length_errors +
5366 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5367 net_stats->rx_crc_errors;
5368
5369 net_stats->tx_aborted_errors =
5370 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5371 stats_blk->stat_Dot3StatsLateCollisions);
5372
5b0c76ad
MC
5373 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5374 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
5375 net_stats->tx_carrier_errors = 0;
5376 else {
5377 net_stats->tx_carrier_errors =
5378 (unsigned long)
5379 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5380 }
5381
5382 net_stats->tx_errors =
6aa20a22 5383 (unsigned long)
b6016b76
MC
5384 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5385 +
5386 net_stats->tx_aborted_errors +
5387 net_stats->tx_carrier_errors;
5388
cea94db9
MC
5389 net_stats->rx_missed_errors =
5390 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5391 stats_blk->stat_FwRxDrop);
5392
b6016b76
MC
5393 return net_stats;
5394}
5395
5396/* All ethtool functions called with rtnl_lock */
5397
5398static int
5399bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5400{
972ec0d4 5401 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 5402 int support_serdes = 0, support_copper = 0;
b6016b76
MC
5403
5404 cmd->supported = SUPPORTED_Autoneg;
7b6b8347
MC
5405 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5406 support_serdes = 1;
5407 support_copper = 1;
5408 } else if (bp->phy_port == PORT_FIBRE)
5409 support_serdes = 1;
5410 else
5411 support_copper = 1;
5412
5413 if (support_serdes) {
b6016b76
MC
5414 cmd->supported |= SUPPORTED_1000baseT_Full |
5415 SUPPORTED_FIBRE;
605a9e20
MC
5416 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5417 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 5418
b6016b76 5419 }
7b6b8347 5420 if (support_copper) {
b6016b76
MC
5421 cmd->supported |= SUPPORTED_10baseT_Half |
5422 SUPPORTED_10baseT_Full |
5423 SUPPORTED_100baseT_Half |
5424 SUPPORTED_100baseT_Full |
5425 SUPPORTED_1000baseT_Full |
5426 SUPPORTED_TP;
5427
b6016b76
MC
5428 }
5429
7b6b8347
MC
5430 spin_lock_bh(&bp->phy_lock);
5431 cmd->port = bp->phy_port;
b6016b76
MC
5432 cmd->advertising = bp->advertising;
5433
5434 if (bp->autoneg & AUTONEG_SPEED) {
5435 cmd->autoneg = AUTONEG_ENABLE;
5436 }
5437 else {
5438 cmd->autoneg = AUTONEG_DISABLE;
5439 }
5440
5441 if (netif_carrier_ok(dev)) {
5442 cmd->speed = bp->line_speed;
5443 cmd->duplex = bp->duplex;
5444 }
5445 else {
5446 cmd->speed = -1;
5447 cmd->duplex = -1;
5448 }
7b6b8347 5449 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5450
5451 cmd->transceiver = XCVR_INTERNAL;
5452 cmd->phy_address = bp->phy_addr;
5453
5454 return 0;
5455}
6aa20a22 5456
b6016b76
MC
5457static int
5458bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5459{
972ec0d4 5460 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5461 u8 autoneg = bp->autoneg;
5462 u8 req_duplex = bp->req_duplex;
5463 u16 req_line_speed = bp->req_line_speed;
5464 u32 advertising = bp->advertising;
7b6b8347
MC
5465 int err = -EINVAL;
5466
5467 spin_lock_bh(&bp->phy_lock);
5468
5469 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5470 goto err_out_unlock;
5471
5472 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5473 goto err_out_unlock;
b6016b76
MC
5474
5475 if (cmd->autoneg == AUTONEG_ENABLE) {
5476 autoneg |= AUTONEG_SPEED;
5477
6aa20a22 5478 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5479
5480 /* allow advertising 1 speed */
5481 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5482 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5483 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5484 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5485
7b6b8347
MC
5486 if (cmd->port == PORT_FIBRE)
5487 goto err_out_unlock;
b6016b76
MC
5488
5489 advertising = cmd->advertising;
5490
27a005b8 5491 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
7b6b8347
MC
5492 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5493 (cmd->port == PORT_TP))
5494 goto err_out_unlock;
5495 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 5496 advertising = cmd->advertising;
7b6b8347
MC
5497 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5498 goto err_out_unlock;
b6016b76 5499 else {
7b6b8347 5500 if (cmd->port == PORT_FIBRE)
b6016b76 5501 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 5502 else
b6016b76 5503 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5504 }
5505 advertising |= ADVERTISED_Autoneg;
5506 }
5507 else {
7b6b8347 5508 if (cmd->port == PORT_FIBRE) {
80be4434
MC
5509 if ((cmd->speed != SPEED_1000 &&
5510 cmd->speed != SPEED_2500) ||
5511 (cmd->duplex != DUPLEX_FULL))
7b6b8347 5512 goto err_out_unlock;
80be4434
MC
5513
5514 if (cmd->speed == SPEED_2500 &&
5515 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
7b6b8347 5516 goto err_out_unlock;
b6016b76 5517 }
7b6b8347
MC
5518 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5519 goto err_out_unlock;
5520
b6016b76
MC
5521 autoneg &= ~AUTONEG_SPEED;
5522 req_line_speed = cmd->speed;
5523 req_duplex = cmd->duplex;
5524 advertising = 0;
5525 }
5526
5527 bp->autoneg = autoneg;
5528 bp->advertising = advertising;
5529 bp->req_line_speed = req_line_speed;
5530 bp->req_duplex = req_duplex;
5531
7b6b8347 5532 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 5533
7b6b8347 5534err_out_unlock:
c770a65c 5535 spin_unlock_bh(&bp->phy_lock);
b6016b76 5536
7b6b8347 5537 return err;
b6016b76
MC
5538}
5539
5540static void
5541bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5542{
972ec0d4 5543 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5544
5545 strcpy(info->driver, DRV_MODULE_NAME);
5546 strcpy(info->version, DRV_MODULE_VERSION);
5547 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 5548 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
5549}
5550
244ac4f4
MC
5551#define BNX2_REGDUMP_LEN (32 * 1024)
5552
5553static int
5554bnx2_get_regs_len(struct net_device *dev)
5555{
5556 return BNX2_REGDUMP_LEN;
5557}
5558
5559static void
5560bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5561{
5562 u32 *p = _p, i, offset;
5563 u8 *orig_p = _p;
5564 struct bnx2 *bp = netdev_priv(dev);
5565 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5566 0x0800, 0x0880, 0x0c00, 0x0c10,
5567 0x0c30, 0x0d08, 0x1000, 0x101c,
5568 0x1040, 0x1048, 0x1080, 0x10a4,
5569 0x1400, 0x1490, 0x1498, 0x14f0,
5570 0x1500, 0x155c, 0x1580, 0x15dc,
5571 0x1600, 0x1658, 0x1680, 0x16d8,
5572 0x1800, 0x1820, 0x1840, 0x1854,
5573 0x1880, 0x1894, 0x1900, 0x1984,
5574 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5575 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5576 0x2000, 0x2030, 0x23c0, 0x2400,
5577 0x2800, 0x2820, 0x2830, 0x2850,
5578 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5579 0x3c00, 0x3c94, 0x4000, 0x4010,
5580 0x4080, 0x4090, 0x43c0, 0x4458,
5581 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5582 0x4fc0, 0x5010, 0x53c0, 0x5444,
5583 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5584 0x5fc0, 0x6000, 0x6400, 0x6428,
5585 0x6800, 0x6848, 0x684c, 0x6860,
5586 0x6888, 0x6910, 0x8000 };
5587
5588 regs->version = 0;
5589
5590 memset(p, 0, BNX2_REGDUMP_LEN);
5591
5592 if (!netif_running(bp->dev))
5593 return;
5594
5595 i = 0;
5596 offset = reg_boundaries[0];
5597 p += offset;
5598 while (offset < BNX2_REGDUMP_LEN) {
5599 *p++ = REG_RD(bp, offset);
5600 offset += 4;
5601 if (offset == reg_boundaries[i + 1]) {
5602 offset = reg_boundaries[i + 2];
5603 p = (u32 *) (orig_p + offset);
5604 i += 2;
5605 }
5606 }
5607}
5608
b6016b76
MC
5609static void
5610bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5611{
972ec0d4 5612 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5613
5614 if (bp->flags & NO_WOL_FLAG) {
5615 wol->supported = 0;
5616 wol->wolopts = 0;
5617 }
5618 else {
5619 wol->supported = WAKE_MAGIC;
5620 if (bp->wol)
5621 wol->wolopts = WAKE_MAGIC;
5622 else
5623 wol->wolopts = 0;
5624 }
5625 memset(&wol->sopass, 0, sizeof(wol->sopass));
5626}
5627
5628static int
5629bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5630{
972ec0d4 5631 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5632
5633 if (wol->wolopts & ~WAKE_MAGIC)
5634 return -EINVAL;
5635
5636 if (wol->wolopts & WAKE_MAGIC) {
5637 if (bp->flags & NO_WOL_FLAG)
5638 return -EINVAL;
5639
5640 bp->wol = 1;
5641 }
5642 else {
5643 bp->wol = 0;
5644 }
5645 return 0;
5646}
5647
5648static int
5649bnx2_nway_reset(struct net_device *dev)
5650{
972ec0d4 5651 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5652 u32 bmcr;
5653
5654 if (!(bp->autoneg & AUTONEG_SPEED)) {
5655 return -EINVAL;
5656 }
5657
c770a65c 5658 spin_lock_bh(&bp->phy_lock);
b6016b76 5659
7b6b8347
MC
5660 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5661 int rc;
5662
5663 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5664 spin_unlock_bh(&bp->phy_lock);
5665 return rc;
5666 }
5667
b6016b76
MC
5668 /* Force a link down visible on the other side */
5669 if (bp->phy_flags & PHY_SERDES_FLAG) {
ca58c3af 5670 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 5671 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5672
5673 msleep(20);
5674
c770a65c 5675 spin_lock_bh(&bp->phy_lock);
f8dd064e
MC
5676
5677 bp->current_interval = SERDES_AN_TIMEOUT;
5678 bp->serdes_an_pending = 1;
5679 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5680 }
5681
ca58c3af 5682 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5683 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 5684 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 5685
c770a65c 5686 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5687
5688 return 0;
5689}
5690
5691static int
5692bnx2_get_eeprom_len(struct net_device *dev)
5693{
972ec0d4 5694 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5695
1122db71 5696 if (bp->flash_info == NULL)
b6016b76
MC
5697 return 0;
5698
1122db71 5699 return (int) bp->flash_size;
b6016b76
MC
5700}
5701
5702static int
5703bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5704 u8 *eebuf)
5705{
972ec0d4 5706 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5707 int rc;
5708
1064e944 5709 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
5710
5711 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5712
5713 return rc;
5714}
5715
5716static int
5717bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5718 u8 *eebuf)
5719{
972ec0d4 5720 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5721 int rc;
5722
1064e944 5723 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
5724
5725 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5726
5727 return rc;
5728}
5729
5730static int
5731bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5732{
972ec0d4 5733 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5734
5735 memset(coal, 0, sizeof(struct ethtool_coalesce));
5736
5737 coal->rx_coalesce_usecs = bp->rx_ticks;
5738 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5739 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5740 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5741
5742 coal->tx_coalesce_usecs = bp->tx_ticks;
5743 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5744 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5745 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5746
5747 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5748
5749 return 0;
5750}
5751
5752static int
5753bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5754{
972ec0d4 5755 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5756
5757 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5758 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5759
6aa20a22 5760 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
5761 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5762
5763 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5764 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5765
5766 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5767 if (bp->rx_quick_cons_trip_int > 0xff)
5768 bp->rx_quick_cons_trip_int = 0xff;
5769
5770 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5771 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5772
5773 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5774 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5775
5776 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5777 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5778
5779 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5780 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5781 0xff;
5782
5783 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
5784 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5785 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5786 bp->stats_ticks = USEC_PER_SEC;
5787 }
b6016b76
MC
5788 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5789 bp->stats_ticks &= 0xffff00;
5790
5791 if (netif_running(bp->dev)) {
5792 bnx2_netif_stop(bp);
5793 bnx2_init_nic(bp);
5794 bnx2_netif_start(bp);
5795 }
5796
5797 return 0;
5798}
5799
5800static void
5801bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5802{
972ec0d4 5803 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5804
13daffa2 5805 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76
MC
5806 ering->rx_mini_max_pending = 0;
5807 ering->rx_jumbo_max_pending = 0;
5808
5809 ering->rx_pending = bp->rx_ring_size;
5810 ering->rx_mini_pending = 0;
5811 ering->rx_jumbo_pending = 0;
5812
5813 ering->tx_max_pending = MAX_TX_DESC_CNT;
5814 ering->tx_pending = bp->tx_ring_size;
5815}
5816
5817static int
5818bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5819{
972ec0d4 5820 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5821
13daffa2 5822 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
b6016b76
MC
5823 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5824 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5825
5826 return -EINVAL;
5827 }
13daffa2
MC
5828 if (netif_running(bp->dev)) {
5829 bnx2_netif_stop(bp);
5830 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5831 bnx2_free_skbs(bp);
5832 bnx2_free_mem(bp);
5833 }
5834
5835 bnx2_set_rx_ring_size(bp, ering->rx_pending);
b6016b76
MC
5836 bp->tx_ring_size = ering->tx_pending;
5837
5838 if (netif_running(bp->dev)) {
13daffa2
MC
5839 int rc;
5840
5841 rc = bnx2_alloc_mem(bp);
5842 if (rc)
5843 return rc;
b6016b76
MC
5844 bnx2_init_nic(bp);
5845 bnx2_netif_start(bp);
5846 }
5847
5848 return 0;
5849}
5850
5851static void
5852bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5853{
972ec0d4 5854 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5855
5856 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5857 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5858 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5859}
5860
5861static int
5862bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5863{
972ec0d4 5864 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5865
5866 bp->req_flow_ctrl = 0;
5867 if (epause->rx_pause)
5868 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5869 if (epause->tx_pause)
5870 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5871
5872 if (epause->autoneg) {
5873 bp->autoneg |= AUTONEG_FLOW_CTRL;
5874 }
5875 else {
5876 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5877 }
5878
c770a65c 5879 spin_lock_bh(&bp->phy_lock);
b6016b76 5880
0d8a6571 5881 bnx2_setup_phy(bp, bp->phy_port);
b6016b76 5882
c770a65c 5883 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5884
5885 return 0;
5886}
5887
5888static u32
5889bnx2_get_rx_csum(struct net_device *dev)
5890{
972ec0d4 5891 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5892
5893 return bp->rx_csum;
5894}
5895
5896static int
5897bnx2_set_rx_csum(struct net_device *dev, u32 data)
5898{
972ec0d4 5899 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5900
5901 bp->rx_csum = data;
5902 return 0;
5903}
5904
b11d6213
MC
5905static int
5906bnx2_set_tso(struct net_device *dev, u32 data)
5907{
4666f87a
MC
5908 struct bnx2 *bp = netdev_priv(dev);
5909
5910 if (data) {
b11d6213 5911 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
5912 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5913 dev->features |= NETIF_F_TSO6;
5914 } else
5915 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5916 NETIF_F_TSO_ECN);
b11d6213
MC
5917 return 0;
5918}
5919
cea94db9 5920#define BNX2_NUM_STATS 46
b6016b76 5921
14ab9b86 5922static struct {
b6016b76
MC
5923 char string[ETH_GSTRING_LEN];
5924} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5925 { "rx_bytes" },
5926 { "rx_error_bytes" },
5927 { "tx_bytes" },
5928 { "tx_error_bytes" },
5929 { "rx_ucast_packets" },
5930 { "rx_mcast_packets" },
5931 { "rx_bcast_packets" },
5932 { "tx_ucast_packets" },
5933 { "tx_mcast_packets" },
5934 { "tx_bcast_packets" },
5935 { "tx_mac_errors" },
5936 { "tx_carrier_errors" },
5937 { "rx_crc_errors" },
5938 { "rx_align_errors" },
5939 { "tx_single_collisions" },
5940 { "tx_multi_collisions" },
5941 { "tx_deferred" },
5942 { "tx_excess_collisions" },
5943 { "tx_late_collisions" },
5944 { "tx_total_collisions" },
5945 { "rx_fragments" },
5946 { "rx_jabbers" },
5947 { "rx_undersize_packets" },
5948 { "rx_oversize_packets" },
5949 { "rx_64_byte_packets" },
5950 { "rx_65_to_127_byte_packets" },
5951 { "rx_128_to_255_byte_packets" },
5952 { "rx_256_to_511_byte_packets" },
5953 { "rx_512_to_1023_byte_packets" },
5954 { "rx_1024_to_1522_byte_packets" },
5955 { "rx_1523_to_9022_byte_packets" },
5956 { "tx_64_byte_packets" },
5957 { "tx_65_to_127_byte_packets" },
5958 { "tx_128_to_255_byte_packets" },
5959 { "tx_256_to_511_byte_packets" },
5960 { "tx_512_to_1023_byte_packets" },
5961 { "tx_1024_to_1522_byte_packets" },
5962 { "tx_1523_to_9022_byte_packets" },
5963 { "rx_xon_frames" },
5964 { "rx_xoff_frames" },
5965 { "tx_xon_frames" },
5966 { "tx_xoff_frames" },
5967 { "rx_mac_ctrl_frames" },
5968 { "rx_filtered_packets" },
5969 { "rx_discards" },
cea94db9 5970 { "rx_fw_discards" },
b6016b76
MC
5971};
5972
5973#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5974
f71e1309 5975static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5976 STATS_OFFSET32(stat_IfHCInOctets_hi),
5977 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5978 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5979 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5980 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5981 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5982 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5983 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5984 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5985 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5986 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
5987 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5988 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5989 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5990 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5991 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5992 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5993 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5994 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5995 STATS_OFFSET32(stat_EtherStatsCollisions),
5996 STATS_OFFSET32(stat_EtherStatsFragments),
5997 STATS_OFFSET32(stat_EtherStatsJabbers),
5998 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5999 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6000 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6001 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6002 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6003 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6004 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6005 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6006 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6007 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6008 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6009 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6010 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6011 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6012 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6013 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6014 STATS_OFFSET32(stat_XonPauseFramesReceived),
6015 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6016 STATS_OFFSET32(stat_OutXonSent),
6017 STATS_OFFSET32(stat_OutXoffSent),
6018 STATS_OFFSET32(stat_MacControlFramesReceived),
6019 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6020 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 6021 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
6022};
6023
6024/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6025 * skipped because of errata.
6aa20a22 6026 */
14ab9b86 6027static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6028 8,0,8,8,8,8,8,8,8,8,
6029 4,0,4,4,4,4,4,4,4,4,
6030 4,4,4,4,4,4,4,4,4,4,
6031 4,4,4,4,4,4,4,4,4,4,
cea94db9 6032 4,4,4,4,4,4,
b6016b76
MC
6033};
6034
5b0c76ad
MC
6035static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6036 8,0,8,8,8,8,8,8,8,8,
6037 4,4,4,4,4,4,4,4,4,4,
6038 4,4,4,4,4,4,4,4,4,4,
6039 4,4,4,4,4,4,4,4,4,4,
cea94db9 6040 4,4,4,4,4,4,
5b0c76ad
MC
6041};
6042
b6016b76
MC
6043#define BNX2_NUM_TESTS 6
6044
14ab9b86 6045static struct {
b6016b76
MC
6046 char string[ETH_GSTRING_LEN];
6047} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6048 { "register_test (offline)" },
6049 { "memory_test (offline)" },
6050 { "loopback_test (offline)" },
6051 { "nvram_test (online)" },
6052 { "interrupt_test (online)" },
6053 { "link_test (online)" },
6054};
6055
6056static int
6057bnx2_self_test_count(struct net_device *dev)
6058{
6059 return BNX2_NUM_TESTS;
6060}
6061
6062static void
6063bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6064{
972ec0d4 6065 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6066
6067 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6068 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
6069 int i;
6070
b6016b76
MC
6071 bnx2_netif_stop(bp);
6072 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6073 bnx2_free_skbs(bp);
6074
6075 if (bnx2_test_registers(bp) != 0) {
6076 buf[0] = 1;
6077 etest->flags |= ETH_TEST_FL_FAILED;
6078 }
6079 if (bnx2_test_memory(bp) != 0) {
6080 buf[1] = 1;
6081 etest->flags |= ETH_TEST_FL_FAILED;
6082 }
bc5a0690 6083 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 6084 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
6085
6086 if (!netif_running(bp->dev)) {
6087 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6088 }
6089 else {
6090 bnx2_init_nic(bp);
6091 bnx2_netif_start(bp);
6092 }
6093
6094 /* wait for link up */
80be4434
MC
6095 for (i = 0; i < 7; i++) {
6096 if (bp->link_up)
6097 break;
6098 msleep_interruptible(1000);
6099 }
b6016b76
MC
6100 }
6101
6102 if (bnx2_test_nvram(bp) != 0) {
6103 buf[3] = 1;
6104 etest->flags |= ETH_TEST_FL_FAILED;
6105 }
6106 if (bnx2_test_intr(bp) != 0) {
6107 buf[4] = 1;
6108 etest->flags |= ETH_TEST_FL_FAILED;
6109 }
6110
6111 if (bnx2_test_link(bp) != 0) {
6112 buf[5] = 1;
6113 etest->flags |= ETH_TEST_FL_FAILED;
6114
6115 }
6116}
6117
6118static void
6119bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6120{
6121 switch (stringset) {
6122 case ETH_SS_STATS:
6123 memcpy(buf, bnx2_stats_str_arr,
6124 sizeof(bnx2_stats_str_arr));
6125 break;
6126 case ETH_SS_TEST:
6127 memcpy(buf, bnx2_tests_str_arr,
6128 sizeof(bnx2_tests_str_arr));
6129 break;
6130 }
6131}
6132
6133static int
6134bnx2_get_stats_count(struct net_device *dev)
6135{
6136 return BNX2_NUM_STATS;
6137}
6138
6139static void
6140bnx2_get_ethtool_stats(struct net_device *dev,
6141 struct ethtool_stats *stats, u64 *buf)
6142{
972ec0d4 6143 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6144 int i;
6145 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 6146 u8 *stats_len_arr = NULL;
b6016b76
MC
6147
6148 if (hw_stats == NULL) {
6149 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6150 return;
6151 }
6152
5b0c76ad
MC
6153 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6154 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6155 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6156 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 6157 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
6158 else
6159 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
6160
6161 for (i = 0; i < BNX2_NUM_STATS; i++) {
6162 if (stats_len_arr[i] == 0) {
6163 /* skip this counter */
6164 buf[i] = 0;
6165 continue;
6166 }
6167 if (stats_len_arr[i] == 4) {
6168 /* 4-byte counter */
6169 buf[i] = (u64)
6170 *(hw_stats + bnx2_stats_offset_arr[i]);
6171 continue;
6172 }
6173 /* 8-byte counter */
6174 buf[i] = (((u64) *(hw_stats +
6175 bnx2_stats_offset_arr[i])) << 32) +
6176 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6177 }
6178}
6179
6180static int
6181bnx2_phys_id(struct net_device *dev, u32 data)
6182{
972ec0d4 6183 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6184 int i;
6185 u32 save;
6186
6187 if (data == 0)
6188 data = 2;
6189
6190 save = REG_RD(bp, BNX2_MISC_CFG);
6191 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6192
6193 for (i = 0; i < (data * 2); i++) {
6194 if ((i % 2) == 0) {
6195 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6196 }
6197 else {
6198 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6199 BNX2_EMAC_LED_1000MB_OVERRIDE |
6200 BNX2_EMAC_LED_100MB_OVERRIDE |
6201 BNX2_EMAC_LED_10MB_OVERRIDE |
6202 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6203 BNX2_EMAC_LED_TRAFFIC);
6204 }
6205 msleep_interruptible(500);
6206 if (signal_pending(current))
6207 break;
6208 }
6209 REG_WR(bp, BNX2_EMAC_LED, 0);
6210 REG_WR(bp, BNX2_MISC_CFG, save);
6211 return 0;
6212}
6213
4666f87a
MC
6214static int
6215bnx2_set_tx_csum(struct net_device *dev, u32 data)
6216{
6217 struct bnx2 *bp = netdev_priv(dev);
6218
6219 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6220 return (ethtool_op_set_tx_hw_csum(dev, data));
6221 else
6222 return (ethtool_op_set_tx_csum(dev, data));
6223}
6224
7282d491 6225static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
6226 .get_settings = bnx2_get_settings,
6227 .set_settings = bnx2_set_settings,
6228 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
6229 .get_regs_len = bnx2_get_regs_len,
6230 .get_regs = bnx2_get_regs,
b6016b76
MC
6231 .get_wol = bnx2_get_wol,
6232 .set_wol = bnx2_set_wol,
6233 .nway_reset = bnx2_nway_reset,
6234 .get_link = ethtool_op_get_link,
6235 .get_eeprom_len = bnx2_get_eeprom_len,
6236 .get_eeprom = bnx2_get_eeprom,
6237 .set_eeprom = bnx2_set_eeprom,
6238 .get_coalesce = bnx2_get_coalesce,
6239 .set_coalesce = bnx2_set_coalesce,
6240 .get_ringparam = bnx2_get_ringparam,
6241 .set_ringparam = bnx2_set_ringparam,
6242 .get_pauseparam = bnx2_get_pauseparam,
6243 .set_pauseparam = bnx2_set_pauseparam,
6244 .get_rx_csum = bnx2_get_rx_csum,
6245 .set_rx_csum = bnx2_set_rx_csum,
6246 .get_tx_csum = ethtool_op_get_tx_csum,
4666f87a 6247 .set_tx_csum = bnx2_set_tx_csum,
b6016b76
MC
6248 .get_sg = ethtool_op_get_sg,
6249 .set_sg = ethtool_op_set_sg,
b6016b76 6250 .get_tso = ethtool_op_get_tso,
b11d6213 6251 .set_tso = bnx2_set_tso,
b6016b76
MC
6252 .self_test_count = bnx2_self_test_count,
6253 .self_test = bnx2_self_test,
6254 .get_strings = bnx2_get_strings,
6255 .phys_id = bnx2_phys_id,
6256 .get_stats_count = bnx2_get_stats_count,
6257 .get_ethtool_stats = bnx2_get_ethtool_stats,
24b8e05d 6258 .get_perm_addr = ethtool_op_get_perm_addr,
b6016b76
MC
6259};
6260
6261/* Called with rtnl_lock */
6262static int
6263bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6264{
14ab9b86 6265 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 6266 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6267 int err;
6268
6269 switch(cmd) {
6270 case SIOCGMIIPHY:
6271 data->phy_id = bp->phy_addr;
6272
6273 /* fallthru */
6274 case SIOCGMIIREG: {
6275 u32 mii_regval;
6276
7b6b8347
MC
6277 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6278 return -EOPNOTSUPP;
6279
dad3e452
MC
6280 if (!netif_running(dev))
6281 return -EAGAIN;
6282
c770a65c 6283 spin_lock_bh(&bp->phy_lock);
b6016b76 6284 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 6285 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6286
6287 data->val_out = mii_regval;
6288
6289 return err;
6290 }
6291
6292 case SIOCSMIIREG:
6293 if (!capable(CAP_NET_ADMIN))
6294 return -EPERM;
6295
7b6b8347
MC
6296 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6297 return -EOPNOTSUPP;
6298
dad3e452
MC
6299 if (!netif_running(dev))
6300 return -EAGAIN;
6301
c770a65c 6302 spin_lock_bh(&bp->phy_lock);
b6016b76 6303 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 6304 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6305
6306 return err;
6307
6308 default:
6309 /* do nothing */
6310 break;
6311 }
6312 return -EOPNOTSUPP;
6313}
6314
6315/* Called with rtnl_lock */
6316static int
6317bnx2_change_mac_addr(struct net_device *dev, void *p)
6318{
6319 struct sockaddr *addr = p;
972ec0d4 6320 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6321
73eef4cd
MC
6322 if (!is_valid_ether_addr(addr->sa_data))
6323 return -EINVAL;
6324
b6016b76
MC
6325 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6326 if (netif_running(dev))
6327 bnx2_set_mac_addr(bp);
6328
6329 return 0;
6330}
6331
6332/* Called with rtnl_lock */
6333static int
6334bnx2_change_mtu(struct net_device *dev, int new_mtu)
6335{
972ec0d4 6336 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6337
6338 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6339 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6340 return -EINVAL;
6341
6342 dev->mtu = new_mtu;
6343 if (netif_running(dev)) {
6344 bnx2_netif_stop(bp);
6345
6346 bnx2_init_nic(bp);
6347
6348 bnx2_netif_start(bp);
6349 }
6350 return 0;
6351}
6352
6353#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6354static void
6355poll_bnx2(struct net_device *dev)
6356{
972ec0d4 6357 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6358
6359 disable_irq(bp->pdev->irq);
7d12e780 6360 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
6361 enable_irq(bp->pdev->irq);
6362}
6363#endif
6364
253c8b75
MC
6365static void __devinit
6366bnx2_get_5709_media(struct bnx2 *bp)
6367{
6368 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6369 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6370 u32 strap;
6371
6372 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6373 return;
6374 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6375 bp->phy_flags |= PHY_SERDES_FLAG;
6376 return;
6377 }
6378
6379 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6380 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6381 else
6382 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6383
6384 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6385 switch (strap) {
6386 case 0x4:
6387 case 0x5:
6388 case 0x6:
6389 bp->phy_flags |= PHY_SERDES_FLAG;
6390 return;
6391 }
6392 } else {
6393 switch (strap) {
6394 case 0x1:
6395 case 0x2:
6396 case 0x4:
6397 bp->phy_flags |= PHY_SERDES_FLAG;
6398 return;
6399 }
6400 }
6401}
6402
883e5151
MC
6403static void __devinit
6404bnx2_get_pci_speed(struct bnx2 *bp)
6405{
6406 u32 reg;
6407
6408 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6409 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6410 u32 clkreg;
6411
6412 bp->flags |= PCIX_FLAG;
6413
6414 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6415
6416 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6417 switch (clkreg) {
6418 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6419 bp->bus_speed_mhz = 133;
6420 break;
6421
6422 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6423 bp->bus_speed_mhz = 100;
6424 break;
6425
6426 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6427 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6428 bp->bus_speed_mhz = 66;
6429 break;
6430
6431 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6432 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6433 bp->bus_speed_mhz = 50;
6434 break;
6435
6436 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6437 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6438 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6439 bp->bus_speed_mhz = 33;
6440 break;
6441 }
6442 }
6443 else {
6444 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6445 bp->bus_speed_mhz = 66;
6446 else
6447 bp->bus_speed_mhz = 33;
6448 }
6449
6450 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6451 bp->flags |= PCI_32BIT_FLAG;
6452
6453}
6454
b6016b76
MC
6455static int __devinit
6456bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6457{
6458 struct bnx2 *bp;
6459 unsigned long mem_len;
58fc2ea4 6460 int rc, i, j;
b6016b76 6461 u32 reg;
40453c83 6462 u64 dma_mask, persist_dma_mask;
b6016b76
MC
6463
6464 SET_MODULE_OWNER(dev);
6465 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 6466 bp = netdev_priv(dev);
b6016b76
MC
6467
6468 bp->flags = 0;
6469 bp->phy_flags = 0;
6470
6471 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6472 rc = pci_enable_device(pdev);
6473 if (rc) {
9b91cf9d 6474 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
b6016b76
MC
6475 goto err_out;
6476 }
6477
6478 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 6479 dev_err(&pdev->dev,
2e8a538d 6480 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
6481 rc = -ENODEV;
6482 goto err_out_disable;
6483 }
6484
6485 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6486 if (rc) {
9b91cf9d 6487 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
6488 goto err_out_disable;
6489 }
6490
6491 pci_set_master(pdev);
6492
6493 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6494 if (bp->pm_cap == 0) {
9b91cf9d 6495 dev_err(&pdev->dev,
2e8a538d 6496 "Cannot find power management capability, aborting.\n");
b6016b76
MC
6497 rc = -EIO;
6498 goto err_out_release;
6499 }
6500
b6016b76
MC
6501 bp->dev = dev;
6502 bp->pdev = pdev;
6503
6504 spin_lock_init(&bp->phy_lock);
1b8227c4 6505 spin_lock_init(&bp->indirect_lock);
c4028958 6506 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
6507
6508 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
59b47d8a 6509 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
b6016b76
MC
6510 dev->mem_end = dev->mem_start + mem_len;
6511 dev->irq = pdev->irq;
6512
6513 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6514
6515 if (!bp->regview) {
9b91cf9d 6516 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
6517 rc = -ENOMEM;
6518 goto err_out_release;
6519 }
6520
6521 /* Configure byte swap and enable write to the reg_window registers.
6522 * Rely on CPU to do target byte swapping on big endian systems
6523 * The chip's target access swapping will not swap all accesses
6524 */
6525 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6526 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6527 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6528
829ca9a3 6529 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6530
6531 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6532
883e5151
MC
6533 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6534 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6535 dev_err(&pdev->dev,
6536 "Cannot find PCIE capability, aborting.\n");
6537 rc = -EIO;
6538 goto err_out_unmap;
6539 }
6540 bp->flags |= PCIE_FLAG;
6541 } else {
59b47d8a
MC
6542 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6543 if (bp->pcix_cap == 0) {
6544 dev_err(&pdev->dev,
6545 "Cannot find PCIX capability, aborting.\n");
6546 rc = -EIO;
6547 goto err_out_unmap;
6548 }
6549 }
6550
8e6a72c4
MC
6551 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6552 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6553 bp->flags |= MSI_CAP_FLAG;
6554 }
6555
40453c83
MC
6556 /* 5708 cannot support DMA addresses > 40-bit. */
6557 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6558 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6559 else
6560 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6561
6562 /* Configure DMA attributes. */
6563 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6564 dev->features |= NETIF_F_HIGHDMA;
6565 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6566 if (rc) {
6567 dev_err(&pdev->dev,
6568 "pci_set_consistent_dma_mask failed, aborting.\n");
6569 goto err_out_unmap;
6570 }
6571 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6572 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6573 goto err_out_unmap;
6574 }
6575
883e5151
MC
6576 if (!(bp->flags & PCIE_FLAG))
6577 bnx2_get_pci_speed(bp);
b6016b76
MC
6578
6579 /* 5706A0 may falsely detect SERR and PERR. */
6580 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6581 reg = REG_RD(bp, PCI_COMMAND);
6582 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6583 REG_WR(bp, PCI_COMMAND, reg);
6584 }
6585 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6586 !(bp->flags & PCIX_FLAG)) {
6587
9b91cf9d 6588 dev_err(&pdev->dev,
2e8a538d 6589 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
6590 goto err_out_unmap;
6591 }
6592
6593 bnx2_init_nvram(bp);
6594
e3648b3d
MC
6595 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6596
6597 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
6598 BNX2_SHM_HDR_SIGNATURE_SIG) {
6599 u32 off = PCI_FUNC(pdev->devfn) << 2;
6600
6601 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6602 } else
e3648b3d
MC
6603 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6604
b6016b76
MC
6605 /* Get the permanent MAC address. First we need to make sure the
6606 * firmware is actually running.
6607 */
e3648b3d 6608 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
6609
6610 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6611 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 6612 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
6613 rc = -ENODEV;
6614 goto err_out_unmap;
6615 }
6616
58fc2ea4
MC
6617 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6618 for (i = 0, j = 0; i < 3; i++) {
6619 u8 num, k, skip0;
6620
6621 num = (u8) (reg >> (24 - (i * 8)));
6622 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6623 if (num >= k || !skip0 || k == 1) {
6624 bp->fw_version[j++] = (num / k) + '0';
6625 skip0 = 0;
6626 }
6627 }
6628 if (i != 2)
6629 bp->fw_version[j++] = '.';
6630 }
6631 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6632 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6633 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6634 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6635 int i;
6636 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6637
6638 bp->fw_version[j++] = ' ';
6639 for (i = 0; i < 3; i++) {
6640 reg = REG_RD_IND(bp, addr + i * 4);
6641 reg = swab32(reg);
6642 memcpy(&bp->fw_version[j], &reg, 4);
6643 j += 4;
6644 }
6645 }
b6016b76 6646
e3648b3d 6647 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
6648 bp->mac_addr[0] = (u8) (reg >> 8);
6649 bp->mac_addr[1] = (u8) reg;
6650
e3648b3d 6651 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
6652 bp->mac_addr[2] = (u8) (reg >> 24);
6653 bp->mac_addr[3] = (u8) (reg >> 16);
6654 bp->mac_addr[4] = (u8) (reg >> 8);
6655 bp->mac_addr[5] = (u8) reg;
6656
6657 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 6658 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
6659
6660 bp->rx_csum = 1;
6661
6662 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6663
6664 bp->tx_quick_cons_trip_int = 20;
6665 bp->tx_quick_cons_trip = 20;
6666 bp->tx_ticks_int = 80;
6667 bp->tx_ticks = 80;
6aa20a22 6668
b6016b76
MC
6669 bp->rx_quick_cons_trip_int = 6;
6670 bp->rx_quick_cons_trip = 6;
6671 bp->rx_ticks_int = 18;
6672 bp->rx_ticks = 18;
6673
6674 bp->stats_ticks = 1000000 & 0xffff00;
6675
6676 bp->timer_interval = HZ;
cd339a0e 6677 bp->current_interval = HZ;
b6016b76 6678
5b0c76ad
MC
6679 bp->phy_addr = 1;
6680
b6016b76 6681 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
6682 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6683 bnx2_get_5709_media(bp);
6684 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
b6016b76 6685 bp->phy_flags |= PHY_SERDES_FLAG;
bac0dff6 6686
0d8a6571 6687 bp->phy_port = PORT_TP;
bac0dff6 6688 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 6689 bp->phy_port = PORT_FIBRE;
b6016b76 6690 bp->flags |= NO_WOL_FLAG;
bac0dff6 6691 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5b0c76ad 6692 bp->phy_addr = 2;
e3648b3d 6693 reg = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
6694 BNX2_SHARED_HW_CFG_CONFIG);
6695 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6696 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6697 }
0d8a6571
MC
6698 bnx2_init_remote_phy(bp);
6699
261dd5ca
MC
6700 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6701 CHIP_NUM(bp) == CHIP_NUM_5708)
6702 bp->phy_flags |= PHY_CRC_FIX_FLAG;
b659f44e
MC
6703 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6704 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
b6016b76 6705
16088272
MC
6706 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6707 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6708 (CHIP_ID(bp) == CHIP_ID_5708_B1))
dda1e390
MC
6709 bp->flags |= NO_WOL_FLAG;
6710
b6016b76
MC
6711 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6712 bp->tx_quick_cons_trip_int =
6713 bp->tx_quick_cons_trip;
6714 bp->tx_ticks_int = bp->tx_ticks;
6715 bp->rx_quick_cons_trip_int =
6716 bp->rx_quick_cons_trip;
6717 bp->rx_ticks_int = bp->rx_ticks;
6718 bp->comp_prod_trip_int = bp->comp_prod_trip;
6719 bp->com_ticks_int = bp->com_ticks;
6720 bp->cmd_ticks_int = bp->cmd_ticks;
6721 }
6722
f9317a40
MC
6723 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6724 *
6725 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6726 * with byte enables disabled on the unused 32-bit word. This is legal
6727 * but causes problems on the AMD 8132 which will eventually stop
6728 * responding after a while.
6729 *
6730 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 6731 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
6732 */
6733 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6734 struct pci_dev *amd_8132 = NULL;
6735
6736 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6737 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6738 amd_8132))) {
f9317a40 6739
44c10138
AK
6740 if (amd_8132->revision >= 0x10 &&
6741 amd_8132->revision <= 0x13) {
f9317a40
MC
6742 disable_msi = 1;
6743 pci_dev_put(amd_8132);
6744 break;
6745 }
6746 }
6747 }
6748
deaf391b 6749 bnx2_set_default_link(bp);
b6016b76
MC
6750 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6751
cd339a0e
MC
6752 init_timer(&bp->timer);
6753 bp->timer.expires = RUN_AT(bp->timer_interval);
6754 bp->timer.data = (unsigned long) bp;
6755 bp->timer.function = bnx2_timer;
6756
b6016b76
MC
6757 return 0;
6758
6759err_out_unmap:
6760 if (bp->regview) {
6761 iounmap(bp->regview);
73eef4cd 6762 bp->regview = NULL;
b6016b76
MC
6763 }
6764
6765err_out_release:
6766 pci_release_regions(pdev);
6767
6768err_out_disable:
6769 pci_disable_device(pdev);
6770 pci_set_drvdata(pdev, NULL);
6771
6772err_out:
6773 return rc;
6774}
6775
883e5151
MC
6776static char * __devinit
6777bnx2_bus_string(struct bnx2 *bp, char *str)
6778{
6779 char *s = str;
6780
6781 if (bp->flags & PCIE_FLAG) {
6782 s += sprintf(s, "PCI Express");
6783 } else {
6784 s += sprintf(s, "PCI");
6785 if (bp->flags & PCIX_FLAG)
6786 s += sprintf(s, "-X");
6787 if (bp->flags & PCI_32BIT_FLAG)
6788 s += sprintf(s, " 32-bit");
6789 else
6790 s += sprintf(s, " 64-bit");
6791 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6792 }
6793 return str;
6794}
6795
b6016b76
MC
6796static int __devinit
6797bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6798{
6799 static int version_printed = 0;
6800 struct net_device *dev = NULL;
6801 struct bnx2 *bp;
6802 int rc, i;
883e5151 6803 char str[40];
b6016b76
MC
6804
6805 if (version_printed++ == 0)
6806 printk(KERN_INFO "%s", version);
6807
6808 /* dev zeroed in init_etherdev */
6809 dev = alloc_etherdev(sizeof(*bp));
6810
6811 if (!dev)
6812 return -ENOMEM;
6813
6814 rc = bnx2_init_board(pdev, dev);
6815 if (rc < 0) {
6816 free_netdev(dev);
6817 return rc;
6818 }
6819
6820 dev->open = bnx2_open;
6821 dev->hard_start_xmit = bnx2_start_xmit;
6822 dev->stop = bnx2_close;
6823 dev->get_stats = bnx2_get_stats;
6824 dev->set_multicast_list = bnx2_set_rx_mode;
6825 dev->do_ioctl = bnx2_ioctl;
6826 dev->set_mac_address = bnx2_change_mac_addr;
6827 dev->change_mtu = bnx2_change_mtu;
6828 dev->tx_timeout = bnx2_tx_timeout;
6829 dev->watchdog_timeo = TX_TIMEOUT;
6830#ifdef BCM_VLAN
6831 dev->vlan_rx_register = bnx2_vlan_rx_register;
b6016b76
MC
6832#endif
6833 dev->poll = bnx2_poll;
6834 dev->ethtool_ops = &bnx2_ethtool_ops;
6835 dev->weight = 64;
6836
972ec0d4 6837 bp = netdev_priv(dev);
b6016b76
MC
6838
6839#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6840 dev->poll_controller = poll_bnx2;
6841#endif
6842
1b2f922f
MC
6843 pci_set_drvdata(pdev, dev);
6844
6845 memcpy(dev->dev_addr, bp->mac_addr, 6);
6846 memcpy(dev->perm_addr, bp->mac_addr, 6);
6847 bp->name = board_info[ent->driver_data].name;
6848
d212f87b 6849 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 6850 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
6851 dev->features |= NETIF_F_IPV6_CSUM;
6852
1b2f922f
MC
6853#ifdef BCM_VLAN
6854 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6855#endif
6856 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6857 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6858 dev->features |= NETIF_F_TSO6;
1b2f922f 6859
b6016b76 6860 if ((rc = register_netdev(dev))) {
9b91cf9d 6861 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
6862 if (bp->regview)
6863 iounmap(bp->regview);
6864 pci_release_regions(pdev);
6865 pci_disable_device(pdev);
6866 pci_set_drvdata(pdev, NULL);
6867 free_netdev(dev);
6868 return rc;
6869 }
6870
883e5151 6871 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
b6016b76
MC
6872 "IRQ %d, ",
6873 dev->name,
6874 bp->name,
6875 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6876 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 6877 bnx2_bus_string(bp, str),
b6016b76
MC
6878 dev->base_addr,
6879 bp->pdev->irq);
6880
6881 printk("node addr ");
6882 for (i = 0; i < 6; i++)
6883 printk("%2.2x", dev->dev_addr[i]);
6884 printk("\n");
6885
b6016b76
MC
6886 return 0;
6887}
6888
6889static void __devexit
6890bnx2_remove_one(struct pci_dev *pdev)
6891{
6892 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6893 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6894
afdc08b9
MC
6895 flush_scheduled_work();
6896
b6016b76
MC
6897 unregister_netdev(dev);
6898
6899 if (bp->regview)
6900 iounmap(bp->regview);
6901
6902 free_netdev(dev);
6903 pci_release_regions(pdev);
6904 pci_disable_device(pdev);
6905 pci_set_drvdata(pdev, NULL);
6906}
6907
6908static int
829ca9a3 6909bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
6910{
6911 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6912 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6913 u32 reset_code;
6914
6915 if (!netif_running(dev))
6916 return 0;
6917
1d60290f 6918 flush_scheduled_work();
b6016b76
MC
6919 bnx2_netif_stop(bp);
6920 netif_device_detach(dev);
6921 del_timer_sync(&bp->timer);
dda1e390 6922 if (bp->flags & NO_WOL_FLAG)
6c4f095e 6923 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 6924 else if (bp->wol)
b6016b76
MC
6925 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6926 else
6927 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6928 bnx2_reset_chip(bp, reset_code);
6929 bnx2_free_skbs(bp);
30c517b2 6930 pci_save_state(pdev);
829ca9a3 6931 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
6932 return 0;
6933}
6934
6935static int
6936bnx2_resume(struct pci_dev *pdev)
6937{
6938 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6939 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6940
6941 if (!netif_running(dev))
6942 return 0;
6943
30c517b2 6944 pci_restore_state(pdev);
829ca9a3 6945 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6946 netif_device_attach(dev);
6947 bnx2_init_nic(bp);
6948 bnx2_netif_start(bp);
6949 return 0;
6950}
6951
6952static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
6953 .name = DRV_MODULE_NAME,
6954 .id_table = bnx2_pci_tbl,
6955 .probe = bnx2_init_one,
6956 .remove = __devexit_p(bnx2_remove_one),
6957 .suspend = bnx2_suspend,
6958 .resume = bnx2_resume,
b6016b76
MC
6959};
6960
6961static int __init bnx2_init(void)
6962{
29917620 6963 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
6964}
6965
6966static void __exit bnx2_cleanup(void)
6967{
6968 pci_unregister_driver(&bnx2_pci_driver);
6969}
6970
6971module_init(bnx2_init);
6972module_exit(bnx2_cleanup);
6973
6974
6975