[BNX2]: Reduce spurious INTA interrupts.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
72fbaeb6 3 * Copyright (c) 2004-2007 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
f2a4f052
MC
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
fba9fe91 49#include <linux/zlib.h>
f2a4f052 50
b6016b76
MC
51#include "bnx2.h"
52#include "bnx2_fw.h"
d43584c8 53#include "bnx2_fw2.h"
b6016b76
MC
54
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
b91b9fd1
MC
57#define DRV_MODULE_VERSION "1.5.11"
58#define DRV_MODULE_RELDATE "June 4, 2007"
b6016b76
MC
59
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
e19360f2 65static const char version[] __devinitdata =
b6016b76
MC
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 69MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
70MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
5b0c76ad
MC
84 BCM5708,
85 BCM5708S,
bac0dff6 86 BCM5709,
27a005b8 87 BCM5709S,
b6016b76
MC
88} board_t;
89
90/* indexed by board_t, above */
f71e1309 91static const struct {
b6016b76
MC
92 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
b6016b76
MC
103 };
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
b6016b76
MC
124 { 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129 /* Slow EEPROM */
37137709 130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
b6016b76
MC
131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
37137709
MC
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76 136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
b6016b76
MC
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
37137709 141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
37137709 147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
37137709
MC
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
37137709
MC
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
b6016b76
MC
212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
e89bbf10
MC
216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
2f8af120 218 u32 diff;
e89bbf10 219
2f8af120 220 smp_mb();
faac9c4b
MC
221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
e89bbf10
MC
231 return (bp->tx_ring_size - diff);
232}
233
b6016b76
MC
234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
1b8227c4
MC
237 u32 val;
238
239 spin_lock_bh(&bp->indirect_lock);
b6016b76 240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
243 return val;
b6016b76
MC
244}
245
246static void
247bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248{
1b8227c4 249 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 252 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
253}
254
255static void
256bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257{
258 offset += cid_addr;
1b8227c4 259 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261 int i;
262
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
267 u32 val;
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270 break;
271 udelay(5);
272 }
273 } else {
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
276 }
1b8227c4 277 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
278}
279
280static int
281bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282{
283 u32 val1;
284 int i, ret;
285
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293 udelay(40);
294 }
295
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301 for (i = 0; i < 50; i++) {
302 udelay(10);
303
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306 udelay(5);
307
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311 break;
312 }
313 }
314
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316 *val = 0x0;
317 ret = -EBUSY;
318 }
319 else {
320 *val = val1;
321 ret = 0;
322 }
323
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 return ret;
335}
336
337static int
338bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339{
340 u32 val1;
341 int i, ret;
342
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350 udelay(40);
351 }
352
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 357
b6016b76
MC
358 for (i = 0; i < 50; i++) {
359 udelay(10);
360
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363 udelay(5);
364 break;
365 }
366 }
367
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369 ret = -EBUSY;
370 else
371 ret = 0;
372
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380 udelay(40);
381 }
382
383 return ret;
384}
385
386static void
387bnx2_disable_int(struct bnx2 *bp)
388{
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392}
393
394static void
395bnx2_enable_int(struct bnx2 *bp)
396{
1269a8a6
MC
397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
b6016b76
MC
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
bf5295bb 404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
405}
406
407static void
408bnx2_disable_int_sync(struct bnx2 *bp)
409{
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
413}
414
415static void
416bnx2_netif_stop(struct bnx2 *bp)
417{
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423 }
424}
425
426static void
427bnx2_netif_start(struct bnx2 *bp)
428{
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
433 bnx2_enable_int(bp);
434 }
435 }
436}
437
438static void
439bnx2_free_mem(struct bnx2 *bp)
440{
13daffa2
MC
441 int i;
442
59b47d8a
MC
443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk[i],
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
449 }
450 }
b6016b76 451 if (bp->status_blk) {
0f31f994 452 pci_free_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
0f31f994 455 bp->stats_blk = NULL;
b6016b76
MC
456 }
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
462 }
b4558ea9
JJ
463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
13daffa2
MC
465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_ring[i],
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
472 }
473 vfree(bp->rx_buf_ring);
b4558ea9 474 bp->rx_buf_ring = NULL;
b6016b76
MC
475}
476
477static int
478bnx2_alloc_mem(struct bnx2 *bp)
479{
0f31f994 480 int i, status_blk_size;
13daffa2 481
0f31f994
MC
482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 GFP_KERNEL);
b6016b76
MC
484 if (bp->tx_buf_ring == NULL)
485 return -ENOMEM;
486
b6016b76
MC
487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
489 TX_DESC_CNT,
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
492 goto alloc_mem_err;
493
13daffa2
MC
494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 bp->rx_max_ring);
b6016b76
MC
496 if (bp->rx_buf_ring == NULL)
497 goto alloc_mem_err;
498
13daffa2
MC
499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500 bp->rx_max_ring);
501
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
508 goto alloc_mem_err;
509
510 }
b6016b76 511
0f31f994
MC
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
516
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
520 goto alloc_mem_err;
521
0f31f994 522 memset(bp->status_blk, 0, bp->status_stats_size);
b6016b76 523
0f31f994
MC
524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525 status_blk_size);
b6016b76 526
0f31f994 527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 528
59b47d8a
MC
529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
532 bp->ctx_pages = 1;
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 BCM_PAGE_SIZE,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
538 goto alloc_mem_err;
539 }
540 }
b6016b76
MC
541 return 0;
542
543alloc_mem_err:
544 bnx2_free_mem(bp);
545 return -ENOMEM;
546}
547
e3648b3d
MC
548static void
549bnx2_report_fw_link(struct bnx2 *bp)
550{
551 u32 fw_link_status = 0;
552
0d8a6571
MC
553 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
554 return;
555
e3648b3d
MC
556 if (bp->link_up) {
557 u32 bmsr;
558
559 switch (bp->line_speed) {
560 case SPEED_10:
561 if (bp->duplex == DUPLEX_HALF)
562 fw_link_status = BNX2_LINK_STATUS_10HALF;
563 else
564 fw_link_status = BNX2_LINK_STATUS_10FULL;
565 break;
566 case SPEED_100:
567 if (bp->duplex == DUPLEX_HALF)
568 fw_link_status = BNX2_LINK_STATUS_100HALF;
569 else
570 fw_link_status = BNX2_LINK_STATUS_100FULL;
571 break;
572 case SPEED_1000:
573 if (bp->duplex == DUPLEX_HALF)
574 fw_link_status = BNX2_LINK_STATUS_1000HALF;
575 else
576 fw_link_status = BNX2_LINK_STATUS_1000FULL;
577 break;
578 case SPEED_2500:
579 if (bp->duplex == DUPLEX_HALF)
580 fw_link_status = BNX2_LINK_STATUS_2500HALF;
581 else
582 fw_link_status = BNX2_LINK_STATUS_2500FULL;
583 break;
584 }
585
586 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
587
588 if (bp->autoneg) {
589 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
590
ca58c3af
MC
591 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
593
594 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
595 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
596 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
597 else
598 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
599 }
600 }
601 else
602 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
603
604 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
605}
606
9b1084b8
MC
607static char *
608bnx2_xceiver_str(struct bnx2 *bp)
609{
610 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
611 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
612 "Copper"));
613}
614
b6016b76
MC
615static void
616bnx2_report_link(struct bnx2 *bp)
617{
618 if (bp->link_up) {
619 netif_carrier_on(bp->dev);
9b1084b8
MC
620 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
621 bnx2_xceiver_str(bp));
b6016b76
MC
622
623 printk("%d Mbps ", bp->line_speed);
624
625 if (bp->duplex == DUPLEX_FULL)
626 printk("full duplex");
627 else
628 printk("half duplex");
629
630 if (bp->flow_ctrl) {
631 if (bp->flow_ctrl & FLOW_CTRL_RX) {
632 printk(", receive ");
633 if (bp->flow_ctrl & FLOW_CTRL_TX)
634 printk("& transmit ");
635 }
636 else {
637 printk(", transmit ");
638 }
639 printk("flow control ON");
640 }
641 printk("\n");
642 }
643 else {
644 netif_carrier_off(bp->dev);
9b1084b8
MC
645 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
646 bnx2_xceiver_str(bp));
b6016b76 647 }
e3648b3d
MC
648
649 bnx2_report_fw_link(bp);
b6016b76
MC
650}
651
652static void
653bnx2_resolve_flow_ctrl(struct bnx2 *bp)
654{
655 u32 local_adv, remote_adv;
656
657 bp->flow_ctrl = 0;
6aa20a22 658 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
659 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
660
661 if (bp->duplex == DUPLEX_FULL) {
662 bp->flow_ctrl = bp->req_flow_ctrl;
663 }
664 return;
665 }
666
667 if (bp->duplex != DUPLEX_FULL) {
668 return;
669 }
670
5b0c76ad
MC
671 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
672 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
673 u32 val;
674
675 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
676 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
677 bp->flow_ctrl |= FLOW_CTRL_TX;
678 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
679 bp->flow_ctrl |= FLOW_CTRL_RX;
680 return;
681 }
682
ca58c3af
MC
683 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
684 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
685
686 if (bp->phy_flags & PHY_SERDES_FLAG) {
687 u32 new_local_adv = 0;
688 u32 new_remote_adv = 0;
689
690 if (local_adv & ADVERTISE_1000XPAUSE)
691 new_local_adv |= ADVERTISE_PAUSE_CAP;
692 if (local_adv & ADVERTISE_1000XPSE_ASYM)
693 new_local_adv |= ADVERTISE_PAUSE_ASYM;
694 if (remote_adv & ADVERTISE_1000XPAUSE)
695 new_remote_adv |= ADVERTISE_PAUSE_CAP;
696 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
697 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
698
699 local_adv = new_local_adv;
700 remote_adv = new_remote_adv;
701 }
702
703 /* See Table 28B-3 of 802.3ab-1999 spec. */
704 if (local_adv & ADVERTISE_PAUSE_CAP) {
705 if(local_adv & ADVERTISE_PAUSE_ASYM) {
706 if (remote_adv & ADVERTISE_PAUSE_CAP) {
707 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
708 }
709 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
710 bp->flow_ctrl = FLOW_CTRL_RX;
711 }
712 }
713 else {
714 if (remote_adv & ADVERTISE_PAUSE_CAP) {
715 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
716 }
717 }
718 }
719 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
720 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
721 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
722
723 bp->flow_ctrl = FLOW_CTRL_TX;
724 }
725 }
726}
727
27a005b8
MC
728static int
729bnx2_5709s_linkup(struct bnx2 *bp)
730{
731 u32 val, speed;
732
733 bp->link_up = 1;
734
735 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
736 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
737 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
738
739 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
740 bp->line_speed = bp->req_line_speed;
741 bp->duplex = bp->req_duplex;
742 return 0;
743 }
744 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
745 switch (speed) {
746 case MII_BNX2_GP_TOP_AN_SPEED_10:
747 bp->line_speed = SPEED_10;
748 break;
749 case MII_BNX2_GP_TOP_AN_SPEED_100:
750 bp->line_speed = SPEED_100;
751 break;
752 case MII_BNX2_GP_TOP_AN_SPEED_1G:
753 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
754 bp->line_speed = SPEED_1000;
755 break;
756 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
757 bp->line_speed = SPEED_2500;
758 break;
759 }
760 if (val & MII_BNX2_GP_TOP_AN_FD)
761 bp->duplex = DUPLEX_FULL;
762 else
763 bp->duplex = DUPLEX_HALF;
764 return 0;
765}
766
b6016b76 767static int
5b0c76ad
MC
768bnx2_5708s_linkup(struct bnx2 *bp)
769{
770 u32 val;
771
772 bp->link_up = 1;
773 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
774 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
775 case BCM5708S_1000X_STAT1_SPEED_10:
776 bp->line_speed = SPEED_10;
777 break;
778 case BCM5708S_1000X_STAT1_SPEED_100:
779 bp->line_speed = SPEED_100;
780 break;
781 case BCM5708S_1000X_STAT1_SPEED_1G:
782 bp->line_speed = SPEED_1000;
783 break;
784 case BCM5708S_1000X_STAT1_SPEED_2G5:
785 bp->line_speed = SPEED_2500;
786 break;
787 }
788 if (val & BCM5708S_1000X_STAT1_FD)
789 bp->duplex = DUPLEX_FULL;
790 else
791 bp->duplex = DUPLEX_HALF;
792
793 return 0;
794}
795
796static int
797bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
798{
799 u32 bmcr, local_adv, remote_adv, common;
800
801 bp->link_up = 1;
802 bp->line_speed = SPEED_1000;
803
ca58c3af 804 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
805 if (bmcr & BMCR_FULLDPLX) {
806 bp->duplex = DUPLEX_FULL;
807 }
808 else {
809 bp->duplex = DUPLEX_HALF;
810 }
811
812 if (!(bmcr & BMCR_ANENABLE)) {
813 return 0;
814 }
815
ca58c3af
MC
816 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
817 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
818
819 common = local_adv & remote_adv;
820 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
821
822 if (common & ADVERTISE_1000XFULL) {
823 bp->duplex = DUPLEX_FULL;
824 }
825 else {
826 bp->duplex = DUPLEX_HALF;
827 }
828 }
829
830 return 0;
831}
832
833static int
834bnx2_copper_linkup(struct bnx2 *bp)
835{
836 u32 bmcr;
837
ca58c3af 838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
839 if (bmcr & BMCR_ANENABLE) {
840 u32 local_adv, remote_adv, common;
841
842 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
843 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
844
845 common = local_adv & (remote_adv >> 2);
846 if (common & ADVERTISE_1000FULL) {
847 bp->line_speed = SPEED_1000;
848 bp->duplex = DUPLEX_FULL;
849 }
850 else if (common & ADVERTISE_1000HALF) {
851 bp->line_speed = SPEED_1000;
852 bp->duplex = DUPLEX_HALF;
853 }
854 else {
ca58c3af
MC
855 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
856 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
857
858 common = local_adv & remote_adv;
859 if (common & ADVERTISE_100FULL) {
860 bp->line_speed = SPEED_100;
861 bp->duplex = DUPLEX_FULL;
862 }
863 else if (common & ADVERTISE_100HALF) {
864 bp->line_speed = SPEED_100;
865 bp->duplex = DUPLEX_HALF;
866 }
867 else if (common & ADVERTISE_10FULL) {
868 bp->line_speed = SPEED_10;
869 bp->duplex = DUPLEX_FULL;
870 }
871 else if (common & ADVERTISE_10HALF) {
872 bp->line_speed = SPEED_10;
873 bp->duplex = DUPLEX_HALF;
874 }
875 else {
876 bp->line_speed = 0;
877 bp->link_up = 0;
878 }
879 }
880 }
881 else {
882 if (bmcr & BMCR_SPEED100) {
883 bp->line_speed = SPEED_100;
884 }
885 else {
886 bp->line_speed = SPEED_10;
887 }
888 if (bmcr & BMCR_FULLDPLX) {
889 bp->duplex = DUPLEX_FULL;
890 }
891 else {
892 bp->duplex = DUPLEX_HALF;
893 }
894 }
895
896 return 0;
897}
898
899static int
900bnx2_set_mac_link(struct bnx2 *bp)
901{
902 u32 val;
903
904 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
905 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
906 (bp->duplex == DUPLEX_HALF)) {
907 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
908 }
909
910 /* Configure the EMAC mode register. */
911 val = REG_RD(bp, BNX2_EMAC_MODE);
912
913 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 914 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 915 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
916
917 if (bp->link_up) {
5b0c76ad
MC
918 switch (bp->line_speed) {
919 case SPEED_10:
59b47d8a
MC
920 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
921 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
922 break;
923 }
924 /* fall through */
925 case SPEED_100:
926 val |= BNX2_EMAC_MODE_PORT_MII;
927 break;
928 case SPEED_2500:
59b47d8a 929 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
930 /* fall through */
931 case SPEED_1000:
932 val |= BNX2_EMAC_MODE_PORT_GMII;
933 break;
934 }
b6016b76
MC
935 }
936 else {
937 val |= BNX2_EMAC_MODE_PORT_GMII;
938 }
939
940 /* Set the MAC to operate in the appropriate duplex mode. */
941 if (bp->duplex == DUPLEX_HALF)
942 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
943 REG_WR(bp, BNX2_EMAC_MODE, val);
944
945 /* Enable/disable rx PAUSE. */
946 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
947
948 if (bp->flow_ctrl & FLOW_CTRL_RX)
949 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
950 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
951
952 /* Enable/disable tx PAUSE. */
953 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
954 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
955
956 if (bp->flow_ctrl & FLOW_CTRL_TX)
957 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
958 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
959
960 /* Acknowledge the interrupt. */
961 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
962
963 return 0;
964}
965
27a005b8
MC
966static void
967bnx2_enable_bmsr1(struct bnx2 *bp)
968{
969 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
970 (CHIP_NUM(bp) == CHIP_NUM_5709))
971 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
972 MII_BNX2_BLK_ADDR_GP_STATUS);
973}
974
975static void
976bnx2_disable_bmsr1(struct bnx2 *bp)
977{
978 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
979 (CHIP_NUM(bp) == CHIP_NUM_5709))
980 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
981 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
982}
983
605a9e20
MC
984static int
985bnx2_test_and_enable_2g5(struct bnx2 *bp)
986{
987 u32 up1;
988 int ret = 1;
989
990 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
991 return 0;
992
993 if (bp->autoneg & AUTONEG_SPEED)
994 bp->advertising |= ADVERTISED_2500baseX_Full;
995
27a005b8
MC
996 if (CHIP_NUM(bp) == CHIP_NUM_5709)
997 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
998
605a9e20
MC
999 bnx2_read_phy(bp, bp->mii_up1, &up1);
1000 if (!(up1 & BCM5708S_UP1_2G5)) {
1001 up1 |= BCM5708S_UP1_2G5;
1002 bnx2_write_phy(bp, bp->mii_up1, up1);
1003 ret = 0;
1004 }
1005
27a005b8
MC
1006 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1007 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1008 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1009
605a9e20
MC
1010 return ret;
1011}
1012
1013static int
1014bnx2_test_and_disable_2g5(struct bnx2 *bp)
1015{
1016 u32 up1;
1017 int ret = 0;
1018
1019 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1020 return 0;
1021
27a005b8
MC
1022 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1023 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1024
605a9e20
MC
1025 bnx2_read_phy(bp, bp->mii_up1, &up1);
1026 if (up1 & BCM5708S_UP1_2G5) {
1027 up1 &= ~BCM5708S_UP1_2G5;
1028 bnx2_write_phy(bp, bp->mii_up1, up1);
1029 ret = 1;
1030 }
1031
27a005b8
MC
1032 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1033 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1034 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1035
605a9e20
MC
1036 return ret;
1037}
1038
1039static void
1040bnx2_enable_forced_2g5(struct bnx2 *bp)
1041{
1042 u32 bmcr;
1043
1044 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1045 return;
1046
27a005b8
MC
1047 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1048 u32 val;
1049
1050 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1051 MII_BNX2_BLK_ADDR_SERDES_DIG);
1052 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1053 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1054 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1055 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1056
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1060
1061 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1062 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1063 bmcr |= BCM5708S_BMCR_FORCE_2500;
1064 }
1065
1066 if (bp->autoneg & AUTONEG_SPEED) {
1067 bmcr &= ~BMCR_ANENABLE;
1068 if (bp->req_duplex == DUPLEX_FULL)
1069 bmcr |= BMCR_FULLDPLX;
1070 }
1071 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1072}
1073
1074static void
1075bnx2_disable_forced_2g5(struct bnx2 *bp)
1076{
1077 u32 bmcr;
1078
1079 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1080 return;
1081
27a005b8
MC
1082 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1083 u32 val;
1084
1085 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1086 MII_BNX2_BLK_ADDR_SERDES_DIG);
1087 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1088 val &= ~MII_BNX2_SD_MISC1_FORCE;
1089 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1090
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1094
1095 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1096 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1098 }
1099
1100 if (bp->autoneg & AUTONEG_SPEED)
1101 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1102 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1103}
1104
b6016b76
MC
1105static int
1106bnx2_set_link(struct bnx2 *bp)
1107{
1108 u32 bmsr;
1109 u8 link_up;
1110
80be4434 1111 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1112 bp->link_up = 1;
1113 return 0;
1114 }
1115
0d8a6571
MC
1116 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1117 return 0;
1118
b6016b76
MC
1119 link_up = bp->link_up;
1120
27a005b8
MC
1121 bnx2_enable_bmsr1(bp);
1122 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1123 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1124 bnx2_disable_bmsr1(bp);
b6016b76
MC
1125
1126 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1127 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1128 u32 val;
1129
1130 val = REG_RD(bp, BNX2_EMAC_STATUS);
1131 if (val & BNX2_EMAC_STATUS_LINK)
1132 bmsr |= BMSR_LSTATUS;
1133 else
1134 bmsr &= ~BMSR_LSTATUS;
1135 }
1136
1137 if (bmsr & BMSR_LSTATUS) {
1138 bp->link_up = 1;
1139
1140 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1141 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1142 bnx2_5706s_linkup(bp);
1143 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1144 bnx2_5708s_linkup(bp);
27a005b8
MC
1145 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1146 bnx2_5709s_linkup(bp);
b6016b76
MC
1147 }
1148 else {
1149 bnx2_copper_linkup(bp);
1150 }
1151 bnx2_resolve_flow_ctrl(bp);
1152 }
1153 else {
1154 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
605a9e20
MC
1155 (bp->autoneg & AUTONEG_SPEED))
1156 bnx2_disable_forced_2g5(bp);
b6016b76 1157
b6016b76
MC
1158 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1159 bp->link_up = 0;
1160 }
1161
1162 if (bp->link_up != link_up) {
1163 bnx2_report_link(bp);
1164 }
1165
1166 bnx2_set_mac_link(bp);
1167
1168 return 0;
1169}
1170
1171static int
1172bnx2_reset_phy(struct bnx2 *bp)
1173{
1174 int i;
1175 u32 reg;
1176
ca58c3af 1177 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1178
1179#define PHY_RESET_MAX_WAIT 100
1180 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1181 udelay(10);
1182
ca58c3af 1183 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1184 if (!(reg & BMCR_RESET)) {
1185 udelay(20);
1186 break;
1187 }
1188 }
1189 if (i == PHY_RESET_MAX_WAIT) {
1190 return -EBUSY;
1191 }
1192 return 0;
1193}
1194
1195static u32
1196bnx2_phy_get_pause_adv(struct bnx2 *bp)
1197{
1198 u32 adv = 0;
1199
1200 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1201 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1202
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE;
1205 }
1206 else {
1207 adv = ADVERTISE_PAUSE_CAP;
1208 }
1209 }
1210 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1211 if (bp->phy_flags & PHY_SERDES_FLAG) {
1212 adv = ADVERTISE_1000XPSE_ASYM;
1213 }
1214 else {
1215 adv = ADVERTISE_PAUSE_ASYM;
1216 }
1217 }
1218 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1219 if (bp->phy_flags & PHY_SERDES_FLAG) {
1220 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1221 }
1222 else {
1223 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1224 }
1225 }
1226 return adv;
1227}
1228
0d8a6571
MC
1229static int bnx2_fw_sync(struct bnx2 *, u32, int);
1230
b6016b76 1231static int
0d8a6571
MC
1232bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1233{
1234 u32 speed_arg = 0, pause_adv;
1235
1236 pause_adv = bnx2_phy_get_pause_adv(bp);
1237
1238 if (bp->autoneg & AUTONEG_SPEED) {
1239 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1240 if (bp->advertising & ADVERTISED_10baseT_Half)
1241 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1242 if (bp->advertising & ADVERTISED_10baseT_Full)
1243 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1244 if (bp->advertising & ADVERTISED_100baseT_Half)
1245 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1246 if (bp->advertising & ADVERTISED_100baseT_Full)
1247 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1248 if (bp->advertising & ADVERTISED_1000baseT_Full)
1249 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1250 if (bp->advertising & ADVERTISED_2500baseX_Full)
1251 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1252 } else {
1253 if (bp->req_line_speed == SPEED_2500)
1254 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1255 else if (bp->req_line_speed == SPEED_1000)
1256 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1257 else if (bp->req_line_speed == SPEED_100) {
1258 if (bp->req_duplex == DUPLEX_FULL)
1259 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1260 else
1261 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1262 } else if (bp->req_line_speed == SPEED_10) {
1263 if (bp->req_duplex == DUPLEX_FULL)
1264 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1265 else
1266 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1267 }
1268 }
1269
1270 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1271 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1272 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1273 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1274
1275 if (port == PORT_TP)
1276 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1277 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1278
1279 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1280
1281 spin_unlock_bh(&bp->phy_lock);
1282 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1283 spin_lock_bh(&bp->phy_lock);
1284
1285 return 0;
1286}
1287
1288static int
1289bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
b6016b76 1290{
605a9e20 1291 u32 adv, bmcr;
b6016b76
MC
1292 u32 new_adv = 0;
1293
0d8a6571
MC
1294 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1295 return (bnx2_setup_remote_phy(bp, port));
1296
b6016b76
MC
1297 if (!(bp->autoneg & AUTONEG_SPEED)) {
1298 u32 new_bmcr;
5b0c76ad
MC
1299 int force_link_down = 0;
1300
605a9e20
MC
1301 if (bp->req_line_speed == SPEED_2500) {
1302 if (!bnx2_test_and_enable_2g5(bp))
1303 force_link_down = 1;
1304 } else if (bp->req_line_speed == SPEED_1000) {
1305 if (bnx2_test_and_disable_2g5(bp))
1306 force_link_down = 1;
1307 }
ca58c3af 1308 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1309 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1310
ca58c3af 1311 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1312 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1313 new_bmcr |= BMCR_SPEED1000;
605a9e20 1314
27a005b8
MC
1315 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1316 if (bp->req_line_speed == SPEED_2500)
1317 bnx2_enable_forced_2g5(bp);
1318 else if (bp->req_line_speed == SPEED_1000) {
1319 bnx2_disable_forced_2g5(bp);
1320 new_bmcr &= ~0x2000;
1321 }
1322
1323 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1324 if (bp->req_line_speed == SPEED_2500)
1325 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1326 else
1327 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1328 }
1329
b6016b76 1330 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1331 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1332 new_bmcr |= BMCR_FULLDPLX;
1333 }
1334 else {
5b0c76ad 1335 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1336 new_bmcr &= ~BMCR_FULLDPLX;
1337 }
5b0c76ad 1338 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1339 /* Force a link down visible on the other side */
1340 if (bp->link_up) {
ca58c3af 1341 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1342 ~(ADVERTISE_1000XFULL |
1343 ADVERTISE_1000XHALF));
ca58c3af 1344 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1345 BMCR_ANRESTART | BMCR_ANENABLE);
1346
1347 bp->link_up = 0;
1348 netif_carrier_off(bp->dev);
ca58c3af 1349 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1350 bnx2_report_link(bp);
b6016b76 1351 }
ca58c3af
MC
1352 bnx2_write_phy(bp, bp->mii_adv, adv);
1353 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1354 } else {
1355 bnx2_resolve_flow_ctrl(bp);
1356 bnx2_set_mac_link(bp);
b6016b76
MC
1357 }
1358 return 0;
1359 }
1360
605a9e20 1361 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1362
b6016b76
MC
1363 if (bp->advertising & ADVERTISED_1000baseT_Full)
1364 new_adv |= ADVERTISE_1000XFULL;
1365
1366 new_adv |= bnx2_phy_get_pause_adv(bp);
1367
ca58c3af
MC
1368 bnx2_read_phy(bp, bp->mii_adv, &adv);
1369 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1370
1371 bp->serdes_an_pending = 0;
1372 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1373 /* Force a link down visible on the other side */
1374 if (bp->link_up) {
ca58c3af 1375 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1376 spin_unlock_bh(&bp->phy_lock);
1377 msleep(20);
1378 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1379 }
1380
ca58c3af
MC
1381 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1382 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1383 BMCR_ANENABLE);
f8dd064e
MC
1384 /* Speed up link-up time when the link partner
1385 * does not autonegotiate which is very common
1386 * in blade servers. Some blade servers use
1387 * IPMI for kerboard input and it's important
1388 * to minimize link disruptions. Autoneg. involves
1389 * exchanging base pages plus 3 next pages and
1390 * normally completes in about 120 msec.
1391 */
1392 bp->current_interval = SERDES_AN_TIMEOUT;
1393 bp->serdes_an_pending = 1;
1394 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1395 } else {
1396 bnx2_resolve_flow_ctrl(bp);
1397 bnx2_set_mac_link(bp);
b6016b76
MC
1398 }
1399
1400 return 0;
1401}
1402
1403#define ETHTOOL_ALL_FIBRE_SPEED \
deaf391b
MC
1404 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1405 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1406 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1407
1408#define ETHTOOL_ALL_COPPER_SPEED \
1409 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1410 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1411 ADVERTISED_1000baseT_Full)
1412
1413#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1414 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1415
b6016b76
MC
1416#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1417
0d8a6571
MC
1418static void
1419bnx2_set_default_remote_link(struct bnx2 *bp)
1420{
1421 u32 link;
1422
1423 if (bp->phy_port == PORT_TP)
1424 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1425 else
1426 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1427
1428 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1429 bp->req_line_speed = 0;
1430 bp->autoneg |= AUTONEG_SPEED;
1431 bp->advertising = ADVERTISED_Autoneg;
1432 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1433 bp->advertising |= ADVERTISED_10baseT_Half;
1434 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1435 bp->advertising |= ADVERTISED_10baseT_Full;
1436 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1437 bp->advertising |= ADVERTISED_100baseT_Half;
1438 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1439 bp->advertising |= ADVERTISED_100baseT_Full;
1440 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1441 bp->advertising |= ADVERTISED_1000baseT_Full;
1442 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1443 bp->advertising |= ADVERTISED_2500baseX_Full;
1444 } else {
1445 bp->autoneg = 0;
1446 bp->advertising = 0;
1447 bp->req_duplex = DUPLEX_FULL;
1448 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1449 bp->req_line_speed = SPEED_10;
1450 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1451 bp->req_duplex = DUPLEX_HALF;
1452 }
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1454 bp->req_line_speed = SPEED_100;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1456 bp->req_duplex = DUPLEX_HALF;
1457 }
1458 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1459 bp->req_line_speed = SPEED_1000;
1460 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1461 bp->req_line_speed = SPEED_2500;
1462 }
1463}
1464
deaf391b
MC
1465static void
1466bnx2_set_default_link(struct bnx2 *bp)
1467{
0d8a6571
MC
1468 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1469 return bnx2_set_default_remote_link(bp);
1470
deaf391b
MC
1471 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1472 bp->req_line_speed = 0;
1473 if (bp->phy_flags & PHY_SERDES_FLAG) {
1474 u32 reg;
1475
1476 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1477
1478 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1479 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1480 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1481 bp->autoneg = 0;
1482 bp->req_line_speed = bp->line_speed = SPEED_1000;
1483 bp->req_duplex = DUPLEX_FULL;
1484 }
1485 } else
1486 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1487}
1488
0d8a6571
MC
1489static void
1490bnx2_remote_phy_event(struct bnx2 *bp)
1491{
1492 u32 msg;
1493 u8 link_up = bp->link_up;
1494 u8 old_port;
1495
1496 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1497
1498 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1499 bp->link_up = 0;
1500 else {
1501 u32 speed;
1502
1503 bp->link_up = 1;
1504 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1505 bp->duplex = DUPLEX_FULL;
1506 switch (speed) {
1507 case BNX2_LINK_STATUS_10HALF:
1508 bp->duplex = DUPLEX_HALF;
1509 case BNX2_LINK_STATUS_10FULL:
1510 bp->line_speed = SPEED_10;
1511 break;
1512 case BNX2_LINK_STATUS_100HALF:
1513 bp->duplex = DUPLEX_HALF;
1514 case BNX2_LINK_STATUS_100BASE_T4:
1515 case BNX2_LINK_STATUS_100FULL:
1516 bp->line_speed = SPEED_100;
1517 break;
1518 case BNX2_LINK_STATUS_1000HALF:
1519 bp->duplex = DUPLEX_HALF;
1520 case BNX2_LINK_STATUS_1000FULL:
1521 bp->line_speed = SPEED_1000;
1522 break;
1523 case BNX2_LINK_STATUS_2500HALF:
1524 bp->duplex = DUPLEX_HALF;
1525 case BNX2_LINK_STATUS_2500FULL:
1526 bp->line_speed = SPEED_2500;
1527 break;
1528 default:
1529 bp->line_speed = 0;
1530 break;
1531 }
1532
1533 spin_lock(&bp->phy_lock);
1534 bp->flow_ctrl = 0;
1535 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1536 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1537 if (bp->duplex == DUPLEX_FULL)
1538 bp->flow_ctrl = bp->req_flow_ctrl;
1539 } else {
1540 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1541 bp->flow_ctrl |= FLOW_CTRL_TX;
1542 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1543 bp->flow_ctrl |= FLOW_CTRL_RX;
1544 }
1545
1546 old_port = bp->phy_port;
1547 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1548 bp->phy_port = PORT_FIBRE;
1549 else
1550 bp->phy_port = PORT_TP;
1551
1552 if (old_port != bp->phy_port)
1553 bnx2_set_default_link(bp);
1554
1555 spin_unlock(&bp->phy_lock);
1556 }
1557 if (bp->link_up != link_up)
1558 bnx2_report_link(bp);
1559
1560 bnx2_set_mac_link(bp);
1561}
1562
1563static int
1564bnx2_set_remote_link(struct bnx2 *bp)
1565{
1566 u32 evt_code;
1567
1568 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1569 switch (evt_code) {
1570 case BNX2_FW_EVT_CODE_LINK_EVENT:
1571 bnx2_remote_phy_event(bp);
1572 break;
1573 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1574 default:
1575 break;
1576 }
1577 return 0;
1578}
1579
b6016b76
MC
1580static int
1581bnx2_setup_copper_phy(struct bnx2 *bp)
1582{
1583 u32 bmcr;
1584 u32 new_bmcr;
1585
ca58c3af 1586 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1587
1588 if (bp->autoneg & AUTONEG_SPEED) {
1589 u32 adv_reg, adv1000_reg;
1590 u32 new_adv_reg = 0;
1591 u32 new_adv1000_reg = 0;
1592
ca58c3af 1593 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1594 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1595 ADVERTISE_PAUSE_ASYM);
1596
1597 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1598 adv1000_reg &= PHY_ALL_1000_SPEED;
1599
1600 if (bp->advertising & ADVERTISED_10baseT_Half)
1601 new_adv_reg |= ADVERTISE_10HALF;
1602 if (bp->advertising & ADVERTISED_10baseT_Full)
1603 new_adv_reg |= ADVERTISE_10FULL;
1604 if (bp->advertising & ADVERTISED_100baseT_Half)
1605 new_adv_reg |= ADVERTISE_100HALF;
1606 if (bp->advertising & ADVERTISED_100baseT_Full)
1607 new_adv_reg |= ADVERTISE_100FULL;
1608 if (bp->advertising & ADVERTISED_1000baseT_Full)
1609 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1610
b6016b76
MC
1611 new_adv_reg |= ADVERTISE_CSMA;
1612
1613 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1614
1615 if ((adv1000_reg != new_adv1000_reg) ||
1616 (adv_reg != new_adv_reg) ||
1617 ((bmcr & BMCR_ANENABLE) == 0)) {
1618
ca58c3af 1619 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1620 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1621 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1622 BMCR_ANENABLE);
1623 }
1624 else if (bp->link_up) {
1625 /* Flow ctrl may have changed from auto to forced */
1626 /* or vice-versa. */
1627
1628 bnx2_resolve_flow_ctrl(bp);
1629 bnx2_set_mac_link(bp);
1630 }
1631 return 0;
1632 }
1633
1634 new_bmcr = 0;
1635 if (bp->req_line_speed == SPEED_100) {
1636 new_bmcr |= BMCR_SPEED100;
1637 }
1638 if (bp->req_duplex == DUPLEX_FULL) {
1639 new_bmcr |= BMCR_FULLDPLX;
1640 }
1641 if (new_bmcr != bmcr) {
1642 u32 bmsr;
b6016b76 1643
ca58c3af
MC
1644 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1645 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1646
b6016b76
MC
1647 if (bmsr & BMSR_LSTATUS) {
1648 /* Force link down */
ca58c3af 1649 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1650 spin_unlock_bh(&bp->phy_lock);
1651 msleep(50);
1652 spin_lock_bh(&bp->phy_lock);
1653
ca58c3af
MC
1654 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1655 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1656 }
1657
ca58c3af 1658 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1659
1660 /* Normally, the new speed is setup after the link has
1661 * gone down and up again. In some cases, link will not go
1662 * down so we need to set up the new speed here.
1663 */
1664 if (bmsr & BMSR_LSTATUS) {
1665 bp->line_speed = bp->req_line_speed;
1666 bp->duplex = bp->req_duplex;
1667 bnx2_resolve_flow_ctrl(bp);
1668 bnx2_set_mac_link(bp);
1669 }
27a005b8
MC
1670 } else {
1671 bnx2_resolve_flow_ctrl(bp);
1672 bnx2_set_mac_link(bp);
b6016b76
MC
1673 }
1674 return 0;
1675}
1676
1677static int
0d8a6571 1678bnx2_setup_phy(struct bnx2 *bp, u8 port)
b6016b76
MC
1679{
1680 if (bp->loopback == MAC_LOOPBACK)
1681 return 0;
1682
1683 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 1684 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1685 }
1686 else {
1687 return (bnx2_setup_copper_phy(bp));
1688 }
1689}
1690
27a005b8
MC
1691static int
1692bnx2_init_5709s_phy(struct bnx2 *bp)
1693{
1694 u32 val;
1695
1696 bp->mii_bmcr = MII_BMCR + 0x10;
1697 bp->mii_bmsr = MII_BMSR + 0x10;
1698 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1699 bp->mii_adv = MII_ADVERTISE + 0x10;
1700 bp->mii_lpa = MII_LPA + 0x10;
1701 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1702
1703 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1704 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1705
1706 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1707 bnx2_reset_phy(bp);
1708
1709 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1710
1711 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1712 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1713 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1714 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1715
1716 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1717 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1718 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1719 val |= BCM5708S_UP1_2G5;
1720 else
1721 val &= ~BCM5708S_UP1_2G5;
1722 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1723
1724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1725 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1726 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1727 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1728
1729 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1730
1731 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1732 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1733 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1734
1735 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1736
1737 return 0;
1738}
1739
b6016b76 1740static int
5b0c76ad
MC
1741bnx2_init_5708s_phy(struct bnx2 *bp)
1742{
1743 u32 val;
1744
27a005b8
MC
1745 bnx2_reset_phy(bp);
1746
1747 bp->mii_up1 = BCM5708S_UP1;
1748
5b0c76ad
MC
1749 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1750 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1751 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1752
1753 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1754 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1755 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1756
1757 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1758 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1759 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1760
1761 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1762 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1763 val |= BCM5708S_UP1_2G5;
1764 bnx2_write_phy(bp, BCM5708S_UP1, val);
1765 }
1766
1767 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1768 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1769 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1770 /* increase tx signal amplitude */
1771 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1772 BCM5708S_BLK_ADDR_TX_MISC);
1773 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1774 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1775 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1776 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1777 }
1778
e3648b3d 1779 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1780 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1781
1782 if (val) {
1783 u32 is_backplane;
1784
e3648b3d 1785 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1786 BNX2_SHARED_HW_CFG_CONFIG);
1787 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1788 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1789 BCM5708S_BLK_ADDR_TX_MISC);
1790 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1791 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1792 BCM5708S_BLK_ADDR_DIG);
1793 }
1794 }
1795 return 0;
1796}
1797
1798static int
1799bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76 1800{
27a005b8
MC
1801 bnx2_reset_phy(bp);
1802
b6016b76
MC
1803 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1804
59b47d8a
MC
1805 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1806 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
1807
1808 if (bp->dev->mtu > 1500) {
1809 u32 val;
1810
1811 /* Set extended packet length bit */
1812 bnx2_write_phy(bp, 0x18, 0x7);
1813 bnx2_read_phy(bp, 0x18, &val);
1814 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1815
1816 bnx2_write_phy(bp, 0x1c, 0x6c00);
1817 bnx2_read_phy(bp, 0x1c, &val);
1818 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1819 }
1820 else {
1821 u32 val;
1822
1823 bnx2_write_phy(bp, 0x18, 0x7);
1824 bnx2_read_phy(bp, 0x18, &val);
1825 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1826
1827 bnx2_write_phy(bp, 0x1c, 0x6c00);
1828 bnx2_read_phy(bp, 0x1c, &val);
1829 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1830 }
1831
1832 return 0;
1833}
1834
1835static int
1836bnx2_init_copper_phy(struct bnx2 *bp)
1837{
5b0c76ad
MC
1838 u32 val;
1839
27a005b8
MC
1840 bnx2_reset_phy(bp);
1841
b6016b76
MC
1842 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1843 bnx2_write_phy(bp, 0x18, 0x0c00);
1844 bnx2_write_phy(bp, 0x17, 0x000a);
1845 bnx2_write_phy(bp, 0x15, 0x310b);
1846 bnx2_write_phy(bp, 0x17, 0x201f);
1847 bnx2_write_phy(bp, 0x15, 0x9506);
1848 bnx2_write_phy(bp, 0x17, 0x401f);
1849 bnx2_write_phy(bp, 0x15, 0x14e2);
1850 bnx2_write_phy(bp, 0x18, 0x0400);
1851 }
1852
b659f44e
MC
1853 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1854 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1855 MII_BNX2_DSP_EXPAND_REG | 0x8);
1856 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1857 val &= ~(1 << 8);
1858 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1859 }
1860
b6016b76 1861 if (bp->dev->mtu > 1500) {
b6016b76
MC
1862 /* Set extended packet length bit */
1863 bnx2_write_phy(bp, 0x18, 0x7);
1864 bnx2_read_phy(bp, 0x18, &val);
1865 bnx2_write_phy(bp, 0x18, val | 0x4000);
1866
1867 bnx2_read_phy(bp, 0x10, &val);
1868 bnx2_write_phy(bp, 0x10, val | 0x1);
1869 }
1870 else {
b6016b76
MC
1871 bnx2_write_phy(bp, 0x18, 0x7);
1872 bnx2_read_phy(bp, 0x18, &val);
1873 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1874
1875 bnx2_read_phy(bp, 0x10, &val);
1876 bnx2_write_phy(bp, 0x10, val & ~0x1);
1877 }
1878
5b0c76ad
MC
1879 /* ethernet@wirespeed */
1880 bnx2_write_phy(bp, 0x18, 0x7007);
1881 bnx2_read_phy(bp, 0x18, &val);
1882 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1883 return 0;
1884}
1885
1886
1887static int
1888bnx2_init_phy(struct bnx2 *bp)
1889{
1890 u32 val;
1891 int rc = 0;
1892
1893 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1894 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1895
ca58c3af
MC
1896 bp->mii_bmcr = MII_BMCR;
1897 bp->mii_bmsr = MII_BMSR;
27a005b8 1898 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
1899 bp->mii_adv = MII_ADVERTISE;
1900 bp->mii_lpa = MII_LPA;
1901
b6016b76
MC
1902 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1903
0d8a6571
MC
1904 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1905 goto setup_phy;
1906
b6016b76
MC
1907 bnx2_read_phy(bp, MII_PHYSID1, &val);
1908 bp->phy_id = val << 16;
1909 bnx2_read_phy(bp, MII_PHYSID2, &val);
1910 bp->phy_id |= val & 0xffff;
1911
1912 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1913 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1914 rc = bnx2_init_5706s_phy(bp);
1915 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1916 rc = bnx2_init_5708s_phy(bp);
27a005b8
MC
1917 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1918 rc = bnx2_init_5709s_phy(bp);
b6016b76
MC
1919 }
1920 else {
1921 rc = bnx2_init_copper_phy(bp);
1922 }
1923
0d8a6571
MC
1924setup_phy:
1925 if (!rc)
1926 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
1927
1928 return rc;
1929}
1930
1931static int
1932bnx2_set_mac_loopback(struct bnx2 *bp)
1933{
1934 u32 mac_mode;
1935
1936 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1937 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1938 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1939 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1940 bp->link_up = 1;
1941 return 0;
1942}
1943
bc5a0690
MC
1944static int bnx2_test_link(struct bnx2 *);
1945
1946static int
1947bnx2_set_phy_loopback(struct bnx2 *bp)
1948{
1949 u32 mac_mode;
1950 int rc, i;
1951
1952 spin_lock_bh(&bp->phy_lock);
ca58c3af 1953 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
1954 BMCR_SPEED1000);
1955 spin_unlock_bh(&bp->phy_lock);
1956 if (rc)
1957 return rc;
1958
1959 for (i = 0; i < 10; i++) {
1960 if (bnx2_test_link(bp) == 0)
1961 break;
80be4434 1962 msleep(100);
bc5a0690
MC
1963 }
1964
1965 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1966 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1967 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1968 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
1969
1970 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1971 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1972 bp->link_up = 1;
1973 return 0;
1974}
1975
b6016b76 1976static int
b090ae2b 1977bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
1978{
1979 int i;
1980 u32 val;
1981
b6016b76
MC
1982 bp->fw_wr_seq++;
1983 msg_data |= bp->fw_wr_seq;
1984
e3648b3d 1985 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
1986
1987 /* wait for an acknowledgement. */
b090ae2b
MC
1988 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1989 msleep(10);
b6016b76 1990
e3648b3d 1991 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
1992
1993 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1994 break;
1995 }
b090ae2b
MC
1996 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1997 return 0;
b6016b76
MC
1998
1999 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2000 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2001 if (!silent)
2002 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2003 "%x\n", msg_data);
b6016b76
MC
2004
2005 msg_data &= ~BNX2_DRV_MSG_CODE;
2006 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2007
e3648b3d 2008 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 2009
b6016b76
MC
2010 return -EBUSY;
2011 }
2012
b090ae2b
MC
2013 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2014 return -EIO;
2015
b6016b76
MC
2016 return 0;
2017}
2018
59b47d8a
MC
2019static int
2020bnx2_init_5709_context(struct bnx2 *bp)
2021{
2022 int i, ret = 0;
2023 u32 val;
2024
2025 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2026 val |= (BCM_PAGE_BITS - 8) << 16;
2027 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2028 for (i = 0; i < 10; i++) {
2029 val = REG_RD(bp, BNX2_CTX_COMMAND);
2030 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2031 break;
2032 udelay(2);
2033 }
2034 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2035 return -EBUSY;
2036
59b47d8a
MC
2037 for (i = 0; i < bp->ctx_pages; i++) {
2038 int j;
2039
2040 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2041 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2042 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2043 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2044 (u64) bp->ctx_blk_mapping[i] >> 32);
2045 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2046 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2047 for (j = 0; j < 10; j++) {
2048
2049 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2050 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2051 break;
2052 udelay(5);
2053 }
2054 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2055 ret = -EBUSY;
2056 break;
2057 }
2058 }
2059 return ret;
2060}
2061
b6016b76
MC
2062static void
2063bnx2_init_context(struct bnx2 *bp)
2064{
2065 u32 vcid;
2066
2067 vcid = 96;
2068 while (vcid) {
2069 u32 vcid_addr, pcid_addr, offset;
7947b20e 2070 int i;
b6016b76
MC
2071
2072 vcid--;
2073
2074 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2075 u32 new_vcid;
2076
2077 vcid_addr = GET_PCID_ADDR(vcid);
2078 if (vcid & 0x8) {
2079 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2080 }
2081 else {
2082 new_vcid = vcid;
2083 }
2084 pcid_addr = GET_PCID_ADDR(new_vcid);
2085 }
2086 else {
2087 vcid_addr = GET_CID_ADDR(vcid);
2088 pcid_addr = vcid_addr;
2089 }
2090
7947b20e
MC
2091 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2092 vcid_addr += (i << PHY_CTX_SHIFT);
2093 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2094
7947b20e
MC
2095 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2096 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2097
7947b20e
MC
2098 /* Zero out the context. */
2099 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2100 CTX_WR(bp, 0x00, offset, 0);
2101
2102 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2103 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2104 }
b6016b76
MC
2105 }
2106}
2107
2108static int
2109bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2110{
2111 u16 *good_mbuf;
2112 u32 good_mbuf_cnt;
2113 u32 val;
2114
2115 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2116 if (good_mbuf == NULL) {
2117 printk(KERN_ERR PFX "Failed to allocate memory in "
2118 "bnx2_alloc_bad_rbuf\n");
2119 return -ENOMEM;
2120 }
2121
2122 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2123 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2124
2125 good_mbuf_cnt = 0;
2126
2127 /* Allocate a bunch of mbufs and save the good ones in an array. */
2128 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2129 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2130 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2131
2132 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2133
2134 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2135
2136 /* The addresses with Bit 9 set are bad memory blocks. */
2137 if (!(val & (1 << 9))) {
2138 good_mbuf[good_mbuf_cnt] = (u16) val;
2139 good_mbuf_cnt++;
2140 }
2141
2142 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2143 }
2144
2145 /* Free the good ones back to the mbuf pool thus discarding
2146 * all the bad ones. */
2147 while (good_mbuf_cnt) {
2148 good_mbuf_cnt--;
2149
2150 val = good_mbuf[good_mbuf_cnt];
2151 val = (val << 9) | val | 1;
2152
2153 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2154 }
2155 kfree(good_mbuf);
2156 return 0;
2157}
2158
2159static void
6aa20a22 2160bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
2161{
2162 u32 val;
2163 u8 *mac_addr = bp->dev->dev_addr;
2164
2165 val = (mac_addr[0] << 8) | mac_addr[1];
2166
2167 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2168
6aa20a22 2169 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2170 (mac_addr[4] << 8) | mac_addr[5];
2171
2172 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2173}
2174
2175static inline int
2176bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2177{
2178 struct sk_buff *skb;
2179 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2180 dma_addr_t mapping;
13daffa2 2181 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2182 unsigned long align;
2183
932f3772 2184 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2185 if (skb == NULL) {
2186 return -ENOMEM;
2187 }
2188
59b47d8a
MC
2189 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2190 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2191
b6016b76
MC
2192 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2193 PCI_DMA_FROMDEVICE);
2194
2195 rx_buf->skb = skb;
2196 pci_unmap_addr_set(rx_buf, mapping, mapping);
2197
2198 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2199 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2200
2201 bp->rx_prod_bseq += bp->rx_buf_use_size;
2202
2203 return 0;
2204}
2205
da3e4fbe
MC
2206static int
2207bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
b6016b76 2208{
da3e4fbe 2209 struct status_block *sblk = bp->status_blk;
b6016b76 2210 u32 new_link_state, old_link_state;
da3e4fbe 2211 int is_set = 1;
b6016b76 2212
da3e4fbe
MC
2213 new_link_state = sblk->status_attn_bits & event;
2214 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2215 if (new_link_state != old_link_state) {
da3e4fbe
MC
2216 if (new_link_state)
2217 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2218 else
2219 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2220 } else
2221 is_set = 0;
2222
2223 return is_set;
2224}
2225
2226static void
2227bnx2_phy_int(struct bnx2 *bp)
2228{
2229 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2230 spin_lock(&bp->phy_lock);
b6016b76 2231 bnx2_set_link(bp);
da3e4fbe 2232 spin_unlock(&bp->phy_lock);
b6016b76 2233 }
0d8a6571
MC
2234 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2235 bnx2_set_remote_link(bp);
2236
b6016b76
MC
2237}
2238
2239static void
2240bnx2_tx_int(struct bnx2 *bp)
2241{
f4e418f7 2242 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2243 u16 hw_cons, sw_cons, sw_ring_cons;
2244 int tx_free_bd = 0;
2245
f4e418f7 2246 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
b6016b76
MC
2247 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2248 hw_cons++;
2249 }
2250 sw_cons = bp->tx_cons;
2251
2252 while (sw_cons != hw_cons) {
2253 struct sw_bd *tx_buf;
2254 struct sk_buff *skb;
2255 int i, last;
2256
2257 sw_ring_cons = TX_RING_IDX(sw_cons);
2258
2259 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2260 skb = tx_buf->skb;
1d39ed56 2261
b6016b76 2262 /* partial BD completions possible with TSO packets */
89114afd 2263 if (skb_is_gso(skb)) {
b6016b76
MC
2264 u16 last_idx, last_ring_idx;
2265
2266 last_idx = sw_cons +
2267 skb_shinfo(skb)->nr_frags + 1;
2268 last_ring_idx = sw_ring_cons +
2269 skb_shinfo(skb)->nr_frags + 1;
2270 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2271 last_idx++;
2272 }
2273 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2274 break;
2275 }
2276 }
1d39ed56 2277
b6016b76
MC
2278 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2279 skb_headlen(skb), PCI_DMA_TODEVICE);
2280
2281 tx_buf->skb = NULL;
2282 last = skb_shinfo(skb)->nr_frags;
2283
2284 for (i = 0; i < last; i++) {
2285 sw_cons = NEXT_TX_BD(sw_cons);
2286
2287 pci_unmap_page(bp->pdev,
2288 pci_unmap_addr(
2289 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2290 mapping),
2291 skb_shinfo(skb)->frags[i].size,
2292 PCI_DMA_TODEVICE);
2293 }
2294
2295 sw_cons = NEXT_TX_BD(sw_cons);
2296
2297 tx_free_bd += last + 1;
2298
745720e5 2299 dev_kfree_skb(skb);
b6016b76 2300
f4e418f7
MC
2301 hw_cons = bp->hw_tx_cons =
2302 sblk->status_tx_quick_consumer_index0;
2303
b6016b76
MC
2304 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2305 hw_cons++;
2306 }
2307 }
2308
e89bbf10 2309 bp->tx_cons = sw_cons;
2f8af120
MC
2310 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2311 * before checking for netif_queue_stopped(). Without the
2312 * memory barrier, there is a small possibility that bnx2_start_xmit()
2313 * will miss it and cause the queue to be stopped forever.
2314 */
2315 smp_mb();
b6016b76 2316
2f8af120
MC
2317 if (unlikely(netif_queue_stopped(bp->dev)) &&
2318 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2319 netif_tx_lock(bp->dev);
b6016b76 2320 if ((netif_queue_stopped(bp->dev)) &&
2f8af120 2321 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
b6016b76 2322 netif_wake_queue(bp->dev);
2f8af120 2323 netif_tx_unlock(bp->dev);
b6016b76 2324 }
b6016b76
MC
2325}
2326
2327static inline void
2328bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2329 u16 cons, u16 prod)
2330{
236b6394
MC
2331 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2332 struct rx_bd *cons_bd, *prod_bd;
2333
2334 cons_rx_buf = &bp->rx_buf_ring[cons];
2335 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
2336
2337 pci_dma_sync_single_for_device(bp->pdev,
2338 pci_unmap_addr(cons_rx_buf, mapping),
2339 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2340
236b6394 2341 bp->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2342
236b6394 2343 prod_rx_buf->skb = skb;
b6016b76 2344
236b6394
MC
2345 if (cons == prod)
2346 return;
b6016b76 2347
236b6394
MC
2348 pci_unmap_addr_set(prod_rx_buf, mapping,
2349 pci_unmap_addr(cons_rx_buf, mapping));
2350
3fdfcc2c
MC
2351 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2352 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2353 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2354 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2355}
2356
2357static int
2358bnx2_rx_int(struct bnx2 *bp, int budget)
2359{
f4e418f7 2360 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2361 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2362 struct l2_fhdr *rx_hdr;
2363 int rx_pkt = 0;
2364
f4e418f7 2365 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
b6016b76
MC
2366 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2367 hw_cons++;
2368 }
2369 sw_cons = bp->rx_cons;
2370 sw_prod = bp->rx_prod;
2371
2372 /* Memory barrier necessary as speculative reads of the rx
2373 * buffer can be ahead of the index in the status block
2374 */
2375 rmb();
2376 while (sw_cons != hw_cons) {
2377 unsigned int len;
ade2bfe7 2378 u32 status;
b6016b76
MC
2379 struct sw_bd *rx_buf;
2380 struct sk_buff *skb;
236b6394 2381 dma_addr_t dma_addr;
b6016b76
MC
2382
2383 sw_ring_cons = RX_RING_IDX(sw_cons);
2384 sw_ring_prod = RX_RING_IDX(sw_prod);
2385
2386 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2387 skb = rx_buf->skb;
236b6394
MC
2388
2389 rx_buf->skb = NULL;
2390
2391 dma_addr = pci_unmap_addr(rx_buf, mapping);
2392
2393 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
2394 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2395
2396 rx_hdr = (struct l2_fhdr *) skb->data;
2397 len = rx_hdr->l2_fhdr_pkt_len - 4;
2398
ade2bfe7 2399 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
2400 (L2_FHDR_ERRORS_BAD_CRC |
2401 L2_FHDR_ERRORS_PHY_DECODE |
2402 L2_FHDR_ERRORS_ALIGNMENT |
2403 L2_FHDR_ERRORS_TOO_SHORT |
2404 L2_FHDR_ERRORS_GIANT_FRAME)) {
2405
2406 goto reuse_rx;
2407 }
2408
2409 /* Since we don't have a jumbo ring, copy small packets
2410 * if mtu > 1500
2411 */
2412 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2413 struct sk_buff *new_skb;
2414
932f3772 2415 new_skb = netdev_alloc_skb(bp->dev, len + 2);
b6016b76
MC
2416 if (new_skb == NULL)
2417 goto reuse_rx;
2418
2419 /* aligned copy */
d626f62b
ACM
2420 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2421 new_skb->data, len + 2);
b6016b76
MC
2422 skb_reserve(new_skb, 2);
2423 skb_put(new_skb, len);
b6016b76
MC
2424
2425 bnx2_reuse_rx_skb(bp, skb,
2426 sw_ring_cons, sw_ring_prod);
2427
2428 skb = new_skb;
2429 }
2430 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
236b6394 2431 pci_unmap_single(bp->pdev, dma_addr,
b6016b76
MC
2432 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2433
2434 skb_reserve(skb, bp->rx_offset);
2435 skb_put(skb, len);
2436 }
2437 else {
2438reuse_rx:
2439 bnx2_reuse_rx_skb(bp, skb,
2440 sw_ring_cons, sw_ring_prod);
2441 goto next_rx;
2442 }
2443
2444 skb->protocol = eth_type_trans(skb, bp->dev);
2445
2446 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 2447 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 2448
745720e5 2449 dev_kfree_skb(skb);
b6016b76
MC
2450 goto next_rx;
2451
2452 }
2453
b6016b76
MC
2454 skb->ip_summed = CHECKSUM_NONE;
2455 if (bp->rx_csum &&
2456 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2457 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2458
ade2bfe7
MC
2459 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2460 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
2461 skb->ip_summed = CHECKSUM_UNNECESSARY;
2462 }
2463
2464#ifdef BCM_VLAN
2465 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2466 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2467 rx_hdr->l2_fhdr_vlan_tag);
2468 }
2469 else
2470#endif
2471 netif_receive_skb(skb);
2472
2473 bp->dev->last_rx = jiffies;
2474 rx_pkt++;
2475
2476next_rx:
b6016b76
MC
2477 sw_cons = NEXT_RX_BD(sw_cons);
2478 sw_prod = NEXT_RX_BD(sw_prod);
2479
2480 if ((rx_pkt == budget))
2481 break;
f4e418f7
MC
2482
2483 /* Refresh hw_cons to see if there is new work */
2484 if (sw_cons == hw_cons) {
2485 hw_cons = bp->hw_rx_cons =
2486 sblk->status_rx_quick_consumer_index0;
2487 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2488 hw_cons++;
2489 rmb();
2490 }
b6016b76
MC
2491 }
2492 bp->rx_cons = sw_cons;
2493 bp->rx_prod = sw_prod;
2494
2495 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2496
2497 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2498
2499 mmiowb();
2500
2501 return rx_pkt;
2502
2503}
2504
2505/* MSI ISR - The only difference between this and the INTx ISR
2506 * is that the MSI interrupt is always serviced.
2507 */
2508static irqreturn_t
7d12e780 2509bnx2_msi(int irq, void *dev_instance)
b6016b76
MC
2510{
2511 struct net_device *dev = dev_instance;
972ec0d4 2512 struct bnx2 *bp = netdev_priv(dev);
b6016b76 2513
c921e4c4 2514 prefetch(bp->status_blk);
b6016b76
MC
2515 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2516 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2517 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2518
2519 /* Return here if interrupt is disabled. */
73eef4cd
MC
2520 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2521 return IRQ_HANDLED;
b6016b76 2522
73eef4cd 2523 netif_rx_schedule(dev);
b6016b76 2524
73eef4cd 2525 return IRQ_HANDLED;
b6016b76
MC
2526}
2527
8e6a72c4
MC
2528static irqreturn_t
2529bnx2_msi_1shot(int irq, void *dev_instance)
2530{
2531 struct net_device *dev = dev_instance;
2532 struct bnx2 *bp = netdev_priv(dev);
2533
2534 prefetch(bp->status_blk);
2535
2536 /* Return here if interrupt is disabled. */
2537 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2538 return IRQ_HANDLED;
2539
2540 netif_rx_schedule(dev);
2541
2542 return IRQ_HANDLED;
2543}
2544
b6016b76 2545static irqreturn_t
7d12e780 2546bnx2_interrupt(int irq, void *dev_instance)
b6016b76
MC
2547{
2548 struct net_device *dev = dev_instance;
972ec0d4 2549 struct bnx2 *bp = netdev_priv(dev);
b8a7ce7b 2550 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2551
2552 /* When using INTx, it is possible for the interrupt to arrive
2553 * at the CPU before the status block posted prior to the
2554 * interrupt. Reading a register will flush the status block.
2555 * When using MSI, the MSI message will always complete after
2556 * the status block write.
2557 */
b8a7ce7b 2558 if ((sblk->status_idx == bp->last_status_idx) &&
b6016b76
MC
2559 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2560 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 2561 return IRQ_NONE;
b6016b76
MC
2562
2563 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2564 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2565 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2566
b8a7ce7b
MC
2567 /* Read back to deassert IRQ immediately to avoid too many
2568 * spurious interrupts.
2569 */
2570 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2571
b6016b76 2572 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
2573 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2574 return IRQ_HANDLED;
b6016b76 2575
b8a7ce7b
MC
2576 if (netif_rx_schedule_prep(dev)) {
2577 bp->last_status_idx = sblk->status_idx;
2578 __netif_rx_schedule(dev);
2579 }
b6016b76 2580
73eef4cd 2581 return IRQ_HANDLED;
b6016b76
MC
2582}
2583
0d8a6571
MC
2584#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2585 STATUS_ATTN_BITS_TIMER_ABORT)
da3e4fbe 2586
f4e418f7
MC
2587static inline int
2588bnx2_has_work(struct bnx2 *bp)
2589{
2590 struct status_block *sblk = bp->status_blk;
2591
2592 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2593 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2594 return 1;
2595
da3e4fbe
MC
2596 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2597 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
2598 return 1;
2599
2600 return 0;
2601}
2602
b6016b76
MC
2603static int
2604bnx2_poll(struct net_device *dev, int *budget)
2605{
972ec0d4 2606 struct bnx2 *bp = netdev_priv(dev);
da3e4fbe
MC
2607 struct status_block *sblk = bp->status_blk;
2608 u32 status_attn_bits = sblk->status_attn_bits;
2609 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 2610
da3e4fbe
MC
2611 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2612 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 2613
b6016b76 2614 bnx2_phy_int(bp);
bf5295bb
MC
2615
2616 /* This is needed to take care of transient status
2617 * during link changes.
2618 */
2619 REG_WR(bp, BNX2_HC_COMMAND,
2620 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2621 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
2622 }
2623
f4e418f7 2624 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
b6016b76 2625 bnx2_tx_int(bp);
b6016b76 2626
f4e418f7 2627 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
b6016b76
MC
2628 int orig_budget = *budget;
2629 int work_done;
2630
2631 if (orig_budget > dev->quota)
2632 orig_budget = dev->quota;
6aa20a22 2633
b6016b76
MC
2634 work_done = bnx2_rx_int(bp, orig_budget);
2635 *budget -= work_done;
2636 dev->quota -= work_done;
b6016b76 2637 }
6aa20a22 2638
f4e418f7
MC
2639 bp->last_status_idx = bp->status_blk->status_idx;
2640 rmb();
2641
2642 if (!bnx2_has_work(bp)) {
b6016b76 2643 netif_rx_complete(dev);
1269a8a6
MC
2644 if (likely(bp->flags & USING_MSI_FLAG)) {
2645 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2646 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2647 bp->last_status_idx);
2648 return 0;
2649 }
2650 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2651 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2652 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2653 bp->last_status_idx);
2654
b6016b76 2655 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1269a8a6
MC
2656 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2657 bp->last_status_idx);
b6016b76
MC
2658 return 0;
2659 }
2660
2661 return 1;
2662}
2663
932ff279 2664/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
2665 * from set_multicast.
2666 */
2667static void
2668bnx2_set_rx_mode(struct net_device *dev)
2669{
972ec0d4 2670 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2671 u32 rx_mode, sort_mode;
2672 int i;
b6016b76 2673
c770a65c 2674 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
2675
2676 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2677 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2678 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2679#ifdef BCM_VLAN
e29054f9 2680 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 2681 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 2682#else
e29054f9
MC
2683 if (!(bp->flags & ASF_ENABLE_FLAG))
2684 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
2685#endif
2686 if (dev->flags & IFF_PROMISC) {
2687 /* Promiscuous mode. */
2688 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
2689 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2690 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
2691 }
2692 else if (dev->flags & IFF_ALLMULTI) {
2693 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2694 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2695 0xffffffff);
2696 }
2697 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2698 }
2699 else {
2700 /* Accept one or more multicast(s). */
2701 struct dev_mc_list *mclist;
2702 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2703 u32 regidx;
2704 u32 bit;
2705 u32 crc;
2706
2707 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2708
2709 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2710 i++, mclist = mclist->next) {
2711
2712 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2713 bit = crc & 0xff;
2714 regidx = (bit & 0xe0) >> 5;
2715 bit &= 0x1f;
2716 mc_filter[regidx] |= (1 << bit);
2717 }
2718
2719 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2720 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2721 mc_filter[i]);
2722 }
2723
2724 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2725 }
2726
2727 if (rx_mode != bp->rx_mode) {
2728 bp->rx_mode = rx_mode;
2729 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2730 }
2731
2732 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2733 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2734 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2735
c770a65c 2736 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
2737}
2738
fba9fe91
MC
2739#define FW_BUF_SIZE 0x8000
2740
2741static int
2742bnx2_gunzip_init(struct bnx2 *bp)
2743{
2744 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2745 goto gunzip_nomem1;
2746
2747 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2748 goto gunzip_nomem2;
2749
2750 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2751 if (bp->strm->workspace == NULL)
2752 goto gunzip_nomem3;
2753
2754 return 0;
2755
2756gunzip_nomem3:
2757 kfree(bp->strm);
2758 bp->strm = NULL;
2759
2760gunzip_nomem2:
2761 vfree(bp->gunzip_buf);
2762 bp->gunzip_buf = NULL;
2763
2764gunzip_nomem1:
2765 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2766 "uncompression.\n", bp->dev->name);
2767 return -ENOMEM;
2768}
2769
2770static void
2771bnx2_gunzip_end(struct bnx2 *bp)
2772{
2773 kfree(bp->strm->workspace);
2774
2775 kfree(bp->strm);
2776 bp->strm = NULL;
2777
2778 if (bp->gunzip_buf) {
2779 vfree(bp->gunzip_buf);
2780 bp->gunzip_buf = NULL;
2781 }
2782}
2783
2784static int
2785bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2786{
2787 int n, rc;
2788
2789 /* check gzip header */
2790 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2791 return -EINVAL;
2792
2793 n = 10;
2794
2795#define FNAME 0x8
2796 if (zbuf[3] & FNAME)
2797 while ((zbuf[n++] != 0) && (n < len));
2798
2799 bp->strm->next_in = zbuf + n;
2800 bp->strm->avail_in = len - n;
2801 bp->strm->next_out = bp->gunzip_buf;
2802 bp->strm->avail_out = FW_BUF_SIZE;
2803
2804 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2805 if (rc != Z_OK)
2806 return rc;
2807
2808 rc = zlib_inflate(bp->strm, Z_FINISH);
2809
2810 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2811 *outbuf = bp->gunzip_buf;
2812
2813 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2814 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2815 bp->dev->name, bp->strm->msg);
2816
2817 zlib_inflateEnd(bp->strm);
2818
2819 if (rc == Z_STREAM_END)
2820 return 0;
2821
2822 return rc;
2823}
2824
b6016b76
MC
2825static void
2826load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2827 u32 rv2p_proc)
2828{
2829 int i;
2830 u32 val;
2831
2832
2833 for (i = 0; i < rv2p_code_len; i += 8) {
fba9fe91 2834 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
b6016b76 2835 rv2p_code++;
fba9fe91 2836 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
b6016b76
MC
2837 rv2p_code++;
2838
2839 if (rv2p_proc == RV2P_PROC1) {
2840 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2841 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2842 }
2843 else {
2844 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2845 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2846 }
2847 }
2848
2849 /* Reset the processor, un-stall is done later. */
2850 if (rv2p_proc == RV2P_PROC1) {
2851 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2852 }
2853 else {
2854 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2855 }
2856}
2857
af3ee519 2858static int
b6016b76
MC
2859load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2860{
2861 u32 offset;
2862 u32 val;
af3ee519 2863 int rc;
b6016b76
MC
2864
2865 /* Halt the CPU. */
2866 val = REG_RD_IND(bp, cpu_reg->mode);
2867 val |= cpu_reg->mode_value_halt;
2868 REG_WR_IND(bp, cpu_reg->mode, val);
2869 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2870
2871 /* Load the Text area. */
2872 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519
MC
2873 if (fw->gz_text) {
2874 u32 text_len;
2875 void *text;
2876
2877 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2878 &text_len);
2879 if (rc)
2880 return rc;
2881
2882 fw->text = text;
2883 }
2884 if (fw->gz_text) {
b6016b76
MC
2885 int j;
2886
2887 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
fba9fe91 2888 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
b6016b76
MC
2889 }
2890 }
2891
2892 /* Load the Data area. */
2893 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2894 if (fw->data) {
2895 int j;
2896
2897 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2898 REG_WR_IND(bp, offset, fw->data[j]);
2899 }
2900 }
2901
2902 /* Load the SBSS area. */
2903 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2904 if (fw->sbss) {
2905 int j;
2906
2907 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2908 REG_WR_IND(bp, offset, fw->sbss[j]);
2909 }
2910 }
2911
2912 /* Load the BSS area. */
2913 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2914 if (fw->bss) {
2915 int j;
2916
2917 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2918 REG_WR_IND(bp, offset, fw->bss[j]);
2919 }
2920 }
2921
2922 /* Load the Read-Only area. */
2923 offset = cpu_reg->spad_base +
2924 (fw->rodata_addr - cpu_reg->mips_view_base);
2925 if (fw->rodata) {
2926 int j;
2927
2928 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2929 REG_WR_IND(bp, offset, fw->rodata[j]);
2930 }
2931 }
2932
2933 /* Clear the pre-fetch instruction. */
2934 REG_WR_IND(bp, cpu_reg->inst, 0);
2935 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2936
2937 /* Start the CPU. */
2938 val = REG_RD_IND(bp, cpu_reg->mode);
2939 val &= ~cpu_reg->mode_value_halt;
2940 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2941 REG_WR_IND(bp, cpu_reg->mode, val);
af3ee519
MC
2942
2943 return 0;
b6016b76
MC
2944}
2945
fba9fe91 2946static int
b6016b76
MC
2947bnx2_init_cpus(struct bnx2 *bp)
2948{
2949 struct cpu_reg cpu_reg;
af3ee519 2950 struct fw_info *fw;
fba9fe91
MC
2951 int rc = 0;
2952 void *text;
2953 u32 text_len;
2954
2955 if ((rc = bnx2_gunzip_init(bp)) != 0)
2956 return rc;
b6016b76
MC
2957
2958 /* Initialize the RV2P processor. */
fba9fe91
MC
2959 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2960 &text_len);
2961 if (rc)
2962 goto init_cpu_err;
2963
2964 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2965
2966 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2967 &text_len);
2968 if (rc)
2969 goto init_cpu_err;
2970
2971 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
b6016b76
MC
2972
2973 /* Initialize the RX Processor. */
2974 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2975 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2976 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2977 cpu_reg.state = BNX2_RXP_CPU_STATE;
2978 cpu_reg.state_value_clear = 0xffffff;
2979 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2980 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2981 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2982 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2983 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2984 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2985 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2986
d43584c8
MC
2987 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2988 fw = &bnx2_rxp_fw_09;
2989 else
2990 fw = &bnx2_rxp_fw_06;
fba9fe91 2991
af3ee519 2992 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2993 if (rc)
2994 goto init_cpu_err;
2995
b6016b76
MC
2996 /* Initialize the TX Processor. */
2997 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2998 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2999 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3000 cpu_reg.state = BNX2_TXP_CPU_STATE;
3001 cpu_reg.state_value_clear = 0xffffff;
3002 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3003 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3004 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3005 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3006 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3007 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3008 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3009
d43584c8
MC
3010 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3011 fw = &bnx2_txp_fw_09;
3012 else
3013 fw = &bnx2_txp_fw_06;
fba9fe91 3014
af3ee519 3015 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3016 if (rc)
3017 goto init_cpu_err;
3018
b6016b76
MC
3019 /* Initialize the TX Patch-up Processor. */
3020 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3021 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3022 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3023 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3024 cpu_reg.state_value_clear = 0xffffff;
3025 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3026 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3027 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3028 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3029 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3030 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3031 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3032
d43584c8
MC
3033 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3034 fw = &bnx2_tpat_fw_09;
3035 else
3036 fw = &bnx2_tpat_fw_06;
fba9fe91 3037
af3ee519 3038 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3039 if (rc)
3040 goto init_cpu_err;
3041
b6016b76
MC
3042 /* Initialize the Completion Processor. */
3043 cpu_reg.mode = BNX2_COM_CPU_MODE;
3044 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3045 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3046 cpu_reg.state = BNX2_COM_CPU_STATE;
3047 cpu_reg.state_value_clear = 0xffffff;
3048 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3049 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3050 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3051 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3052 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3053 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3054 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3055
d43584c8
MC
3056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3057 fw = &bnx2_com_fw_09;
3058 else
3059 fw = &bnx2_com_fw_06;
fba9fe91 3060
af3ee519 3061 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3062 if (rc)
3063 goto init_cpu_err;
3064
d43584c8
MC
3065 /* Initialize the Command Processor. */
3066 cpu_reg.mode = BNX2_CP_CPU_MODE;
3067 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3068 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3069 cpu_reg.state = BNX2_CP_CPU_STATE;
3070 cpu_reg.state_value_clear = 0xffffff;
3071 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3072 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3073 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3074 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3075 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3076 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3077 cpu_reg.mips_view_base = 0x8000000;
b6016b76 3078
d43584c8
MC
3079 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3080 fw = &bnx2_cp_fw_09;
b6016b76 3081
6c1bbcc8 3082 rc = load_cpu_fw(bp, &cpu_reg, fw);
d43584c8
MC
3083 if (rc)
3084 goto init_cpu_err;
3085 }
fba9fe91
MC
3086init_cpu_err:
3087 bnx2_gunzip_end(bp);
3088 return rc;
b6016b76
MC
3089}
3090
3091static int
829ca9a3 3092bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3093{
3094 u16 pmcsr;
3095
3096 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3097
3098 switch (state) {
829ca9a3 3099 case PCI_D0: {
b6016b76
MC
3100 u32 val;
3101
3102 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3103 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3104 PCI_PM_CTRL_PME_STATUS);
3105
3106 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3107 /* delay required during transition out of D3hot */
3108 msleep(20);
3109
3110 val = REG_RD(bp, BNX2_EMAC_MODE);
3111 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3112 val &= ~BNX2_EMAC_MODE_MPKT;
3113 REG_WR(bp, BNX2_EMAC_MODE, val);
3114
3115 val = REG_RD(bp, BNX2_RPM_CONFIG);
3116 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3117 REG_WR(bp, BNX2_RPM_CONFIG, val);
3118 break;
3119 }
829ca9a3 3120 case PCI_D3hot: {
b6016b76
MC
3121 int i;
3122 u32 val, wol_msg;
3123
3124 if (bp->wol) {
3125 u32 advertising;
3126 u8 autoneg;
3127
3128 autoneg = bp->autoneg;
3129 advertising = bp->advertising;
3130
3131 bp->autoneg = AUTONEG_SPEED;
3132 bp->advertising = ADVERTISED_10baseT_Half |
3133 ADVERTISED_10baseT_Full |
3134 ADVERTISED_100baseT_Half |
3135 ADVERTISED_100baseT_Full |
3136 ADVERTISED_Autoneg;
3137
3138 bnx2_setup_copper_phy(bp);
3139
3140 bp->autoneg = autoneg;
3141 bp->advertising = advertising;
3142
3143 bnx2_set_mac_addr(bp);
3144
3145 val = REG_RD(bp, BNX2_EMAC_MODE);
3146
3147 /* Enable port mode. */
3148 val &= ~BNX2_EMAC_MODE_PORT;
3149 val |= BNX2_EMAC_MODE_PORT_MII |
3150 BNX2_EMAC_MODE_MPKT_RCVD |
3151 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76
MC
3152 BNX2_EMAC_MODE_MPKT;
3153
3154 REG_WR(bp, BNX2_EMAC_MODE, val);
3155
3156 /* receive all multicast */
3157 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3158 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3159 0xffffffff);
3160 }
3161 REG_WR(bp, BNX2_EMAC_RX_MODE,
3162 BNX2_EMAC_RX_MODE_SORT_MODE);
3163
3164 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3165 BNX2_RPM_SORT_USER0_MC_EN;
3166 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3167 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3168 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3169 BNX2_RPM_SORT_USER0_ENA);
3170
3171 /* Need to enable EMAC and RPM for WOL. */
3172 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3173 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3174 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3175 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3176
3177 val = REG_RD(bp, BNX2_RPM_CONFIG);
3178 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3179 REG_WR(bp, BNX2_RPM_CONFIG, val);
3180
3181 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3182 }
3183 else {
3184 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3185 }
3186
dda1e390
MC
3187 if (!(bp->flags & NO_WOL_FLAG))
3188 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
3189
3190 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3191 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3192 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3193
3194 if (bp->wol)
3195 pmcsr |= 3;
3196 }
3197 else {
3198 pmcsr |= 3;
3199 }
3200 if (bp->wol) {
3201 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3202 }
3203 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3204 pmcsr);
3205
3206 /* No more memory access after this point until
3207 * device is brought back to D0.
3208 */
3209 udelay(50);
3210 break;
3211 }
3212 default:
3213 return -EINVAL;
3214 }
3215 return 0;
3216}
3217
3218static int
3219bnx2_acquire_nvram_lock(struct bnx2 *bp)
3220{
3221 u32 val;
3222 int j;
3223
3224 /* Request access to the flash interface. */
3225 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3226 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3227 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3228 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3229 break;
3230
3231 udelay(5);
3232 }
3233
3234 if (j >= NVRAM_TIMEOUT_COUNT)
3235 return -EBUSY;
3236
3237 return 0;
3238}
3239
3240static int
3241bnx2_release_nvram_lock(struct bnx2 *bp)
3242{
3243 int j;
3244 u32 val;
3245
3246 /* Relinquish nvram interface. */
3247 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3248
3249 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3250 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3251 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3252 break;
3253
3254 udelay(5);
3255 }
3256
3257 if (j >= NVRAM_TIMEOUT_COUNT)
3258 return -EBUSY;
3259
3260 return 0;
3261}
3262
3263
3264static int
3265bnx2_enable_nvram_write(struct bnx2 *bp)
3266{
3267 u32 val;
3268
3269 val = REG_RD(bp, BNX2_MISC_CFG);
3270 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3271
3272 if (!bp->flash_info->buffered) {
3273 int j;
3274
3275 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3276 REG_WR(bp, BNX2_NVM_COMMAND,
3277 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3278
3279 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3280 udelay(5);
3281
3282 val = REG_RD(bp, BNX2_NVM_COMMAND);
3283 if (val & BNX2_NVM_COMMAND_DONE)
3284 break;
3285 }
3286
3287 if (j >= NVRAM_TIMEOUT_COUNT)
3288 return -EBUSY;
3289 }
3290 return 0;
3291}
3292
3293static void
3294bnx2_disable_nvram_write(struct bnx2 *bp)
3295{
3296 u32 val;
3297
3298 val = REG_RD(bp, BNX2_MISC_CFG);
3299 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3300}
3301
3302
3303static void
3304bnx2_enable_nvram_access(struct bnx2 *bp)
3305{
3306 u32 val;
3307
3308 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3309 /* Enable both bits, even on read. */
6aa20a22 3310 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3311 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3312}
3313
3314static void
3315bnx2_disable_nvram_access(struct bnx2 *bp)
3316{
3317 u32 val;
3318
3319 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3320 /* Disable both bits, even after read. */
6aa20a22 3321 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3322 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3323 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3324}
3325
3326static int
3327bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3328{
3329 u32 cmd;
3330 int j;
3331
3332 if (bp->flash_info->buffered)
3333 /* Buffered flash, no erase needed */
3334 return 0;
3335
3336 /* Build an erase command */
3337 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3338 BNX2_NVM_COMMAND_DOIT;
3339
3340 /* Need to clear DONE bit separately. */
3341 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3342
3343 /* Address of the NVRAM to read from. */
3344 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3345
3346 /* Issue an erase command. */
3347 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3348
3349 /* Wait for completion. */
3350 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3351 u32 val;
3352
3353 udelay(5);
3354
3355 val = REG_RD(bp, BNX2_NVM_COMMAND);
3356 if (val & BNX2_NVM_COMMAND_DONE)
3357 break;
3358 }
3359
3360 if (j >= NVRAM_TIMEOUT_COUNT)
3361 return -EBUSY;
3362
3363 return 0;
3364}
3365
3366static int
3367bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3368{
3369 u32 cmd;
3370 int j;
3371
3372 /* Build the command word. */
3373 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3374
3375 /* Calculate an offset of a buffered flash. */
3376 if (bp->flash_info->buffered) {
3377 offset = ((offset / bp->flash_info->page_size) <<
3378 bp->flash_info->page_bits) +
3379 (offset % bp->flash_info->page_size);
3380 }
3381
3382 /* Need to clear DONE bit separately. */
3383 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3384
3385 /* Address of the NVRAM to read from. */
3386 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3387
3388 /* Issue a read command. */
3389 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3390
3391 /* Wait for completion. */
3392 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3393 u32 val;
3394
3395 udelay(5);
3396
3397 val = REG_RD(bp, BNX2_NVM_COMMAND);
3398 if (val & BNX2_NVM_COMMAND_DONE) {
3399 val = REG_RD(bp, BNX2_NVM_READ);
3400
3401 val = be32_to_cpu(val);
3402 memcpy(ret_val, &val, 4);
3403 break;
3404 }
3405 }
3406 if (j >= NVRAM_TIMEOUT_COUNT)
3407 return -EBUSY;
3408
3409 return 0;
3410}
3411
3412
3413static int
3414bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3415{
3416 u32 cmd, val32;
3417 int j;
3418
3419 /* Build the command word. */
3420 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3421
3422 /* Calculate an offset of a buffered flash. */
3423 if (bp->flash_info->buffered) {
3424 offset = ((offset / bp->flash_info->page_size) <<
3425 bp->flash_info->page_bits) +
3426 (offset % bp->flash_info->page_size);
3427 }
3428
3429 /* Need to clear DONE bit separately. */
3430 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3431
3432 memcpy(&val32, val, 4);
3433 val32 = cpu_to_be32(val32);
3434
3435 /* Write the data. */
3436 REG_WR(bp, BNX2_NVM_WRITE, val32);
3437
3438 /* Address of the NVRAM to write to. */
3439 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3440
3441 /* Issue the write command. */
3442 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3443
3444 /* Wait for completion. */
3445 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3446 udelay(5);
3447
3448 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3449 break;
3450 }
3451 if (j >= NVRAM_TIMEOUT_COUNT)
3452 return -EBUSY;
3453
3454 return 0;
3455}
3456
3457static int
3458bnx2_init_nvram(struct bnx2 *bp)
3459{
3460 u32 val;
3461 int j, entry_count, rc;
3462 struct flash_spec *flash;
3463
3464 /* Determine the selected interface. */
3465 val = REG_RD(bp, BNX2_NVM_CFG1);
3466
3467 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3468
3469 rc = 0;
3470 if (val & 0x40000000) {
3471
3472 /* Flash interface has been reconfigured */
3473 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
3474 j++, flash++) {
3475 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3476 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
3477 bp->flash_info = flash;
3478 break;
3479 }
3480 }
3481 }
3482 else {
37137709 3483 u32 mask;
b6016b76
MC
3484 /* Not yet been reconfigured */
3485
37137709
MC
3486 if (val & (1 << 23))
3487 mask = FLASH_BACKUP_STRAP_MASK;
3488 else
3489 mask = FLASH_STRAP_MASK;
3490
b6016b76
MC
3491 for (j = 0, flash = &flash_table[0]; j < entry_count;
3492 j++, flash++) {
3493
37137709 3494 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
3495 bp->flash_info = flash;
3496
3497 /* Request access to the flash interface. */
3498 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3499 return rc;
3500
3501 /* Enable access to flash interface */
3502 bnx2_enable_nvram_access(bp);
3503
3504 /* Reconfigure the flash interface */
3505 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3506 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3507 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3508 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3509
3510 /* Disable access to flash interface */
3511 bnx2_disable_nvram_access(bp);
3512 bnx2_release_nvram_lock(bp);
3513
3514 break;
3515 }
3516 }
3517 } /* if (val & 0x40000000) */
3518
3519 if (j == entry_count) {
3520 bp->flash_info = NULL;
2f23c523 3521 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 3522 return -ENODEV;
b6016b76
MC
3523 }
3524
1122db71
MC
3525 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3526 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3527 if (val)
3528 bp->flash_size = val;
3529 else
3530 bp->flash_size = bp->flash_info->total_size;
3531
b6016b76
MC
3532 return rc;
3533}
3534
3535static int
3536bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3537 int buf_size)
3538{
3539 int rc = 0;
3540 u32 cmd_flags, offset32, len32, extra;
3541
3542 if (buf_size == 0)
3543 return 0;
3544
3545 /* Request access to the flash interface. */
3546 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3547 return rc;
3548
3549 /* Enable access to flash interface */
3550 bnx2_enable_nvram_access(bp);
3551
3552 len32 = buf_size;
3553 offset32 = offset;
3554 extra = 0;
3555
3556 cmd_flags = 0;
3557
3558 if (offset32 & 3) {
3559 u8 buf[4];
3560 u32 pre_len;
3561
3562 offset32 &= ~3;
3563 pre_len = 4 - (offset & 3);
3564
3565 if (pre_len >= len32) {
3566 pre_len = len32;
3567 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3568 BNX2_NVM_COMMAND_LAST;
3569 }
3570 else {
3571 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3572 }
3573
3574 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3575
3576 if (rc)
3577 return rc;
3578
3579 memcpy(ret_buf, buf + (offset & 3), pre_len);
3580
3581 offset32 += 4;
3582 ret_buf += pre_len;
3583 len32 -= pre_len;
3584 }
3585 if (len32 & 3) {
3586 extra = 4 - (len32 & 3);
3587 len32 = (len32 + 4) & ~3;
3588 }
3589
3590 if (len32 == 4) {
3591 u8 buf[4];
3592
3593 if (cmd_flags)
3594 cmd_flags = BNX2_NVM_COMMAND_LAST;
3595 else
3596 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3597 BNX2_NVM_COMMAND_LAST;
3598
3599 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3600
3601 memcpy(ret_buf, buf, 4 - extra);
3602 }
3603 else if (len32 > 0) {
3604 u8 buf[4];
3605
3606 /* Read the first word. */
3607 if (cmd_flags)
3608 cmd_flags = 0;
3609 else
3610 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3611
3612 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3613
3614 /* Advance to the next dword. */
3615 offset32 += 4;
3616 ret_buf += 4;
3617 len32 -= 4;
3618
3619 while (len32 > 4 && rc == 0) {
3620 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3621
3622 /* Advance to the next dword. */
3623 offset32 += 4;
3624 ret_buf += 4;
3625 len32 -= 4;
3626 }
3627
3628 if (rc)
3629 return rc;
3630
3631 cmd_flags = BNX2_NVM_COMMAND_LAST;
3632 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3633
3634 memcpy(ret_buf, buf, 4 - extra);
3635 }
3636
3637 /* Disable access to flash interface */
3638 bnx2_disable_nvram_access(bp);
3639
3640 bnx2_release_nvram_lock(bp);
3641
3642 return rc;
3643}
3644
3645static int
3646bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3647 int buf_size)
3648{
3649 u32 written, offset32, len32;
e6be763f 3650 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
3651 int rc = 0;
3652 int align_start, align_end;
3653
3654 buf = data_buf;
3655 offset32 = offset;
3656 len32 = buf_size;
3657 align_start = align_end = 0;
3658
3659 if ((align_start = (offset32 & 3))) {
3660 offset32 &= ~3;
c873879c
MC
3661 len32 += align_start;
3662 if (len32 < 4)
3663 len32 = 4;
b6016b76
MC
3664 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3665 return rc;
3666 }
3667
3668 if (len32 & 3) {
c873879c
MC
3669 align_end = 4 - (len32 & 3);
3670 len32 += align_end;
3671 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3672 return rc;
b6016b76
MC
3673 }
3674
3675 if (align_start || align_end) {
e6be763f
MC
3676 align_buf = kmalloc(len32, GFP_KERNEL);
3677 if (align_buf == NULL)
b6016b76
MC
3678 return -ENOMEM;
3679 if (align_start) {
e6be763f 3680 memcpy(align_buf, start, 4);
b6016b76
MC
3681 }
3682 if (align_end) {
e6be763f 3683 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 3684 }
e6be763f
MC
3685 memcpy(align_buf + align_start, data_buf, buf_size);
3686 buf = align_buf;
b6016b76
MC
3687 }
3688
ae181bc4
MC
3689 if (bp->flash_info->buffered == 0) {
3690 flash_buffer = kmalloc(264, GFP_KERNEL);
3691 if (flash_buffer == NULL) {
3692 rc = -ENOMEM;
3693 goto nvram_write_end;
3694 }
3695 }
3696
b6016b76
MC
3697 written = 0;
3698 while ((written < len32) && (rc == 0)) {
3699 u32 page_start, page_end, data_start, data_end;
3700 u32 addr, cmd_flags;
3701 int i;
b6016b76
MC
3702
3703 /* Find the page_start addr */
3704 page_start = offset32 + written;
3705 page_start -= (page_start % bp->flash_info->page_size);
3706 /* Find the page_end addr */
3707 page_end = page_start + bp->flash_info->page_size;
3708 /* Find the data_start addr */
3709 data_start = (written == 0) ? offset32 : page_start;
3710 /* Find the data_end addr */
6aa20a22 3711 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
3712 (offset32 + len32) : page_end;
3713
3714 /* Request access to the flash interface. */
3715 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3716 goto nvram_write_end;
3717
3718 /* Enable access to flash interface */
3719 bnx2_enable_nvram_access(bp);
3720
3721 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3722 if (bp->flash_info->buffered == 0) {
3723 int j;
3724
3725 /* Read the whole page into the buffer
3726 * (non-buffer flash only) */
3727 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3728 if (j == (bp->flash_info->page_size - 4)) {
3729 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3730 }
3731 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
3732 page_start + j,
3733 &flash_buffer[j],
b6016b76
MC
3734 cmd_flags);
3735
3736 if (rc)
3737 goto nvram_write_end;
3738
3739 cmd_flags = 0;
3740 }
3741 }
3742
3743 /* Enable writes to flash interface (unlock write-protect) */
3744 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3745 goto nvram_write_end;
3746
b6016b76
MC
3747 /* Loop to write back the buffer data from page_start to
3748 * data_start */
3749 i = 0;
3750 if (bp->flash_info->buffered == 0) {
c873879c
MC
3751 /* Erase the page */
3752 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3753 goto nvram_write_end;
3754
3755 /* Re-enable the write again for the actual write */
3756 bnx2_enable_nvram_write(bp);
3757
b6016b76
MC
3758 for (addr = page_start; addr < data_start;
3759 addr += 4, i += 4) {
6aa20a22 3760
b6016b76
MC
3761 rc = bnx2_nvram_write_dword(bp, addr,
3762 &flash_buffer[i], cmd_flags);
3763
3764 if (rc != 0)
3765 goto nvram_write_end;
3766
3767 cmd_flags = 0;
3768 }
3769 }
3770
3771 /* Loop to write the new data from data_start to data_end */
bae25761 3772 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76
MC
3773 if ((addr == page_end - 4) ||
3774 ((bp->flash_info->buffered) &&
3775 (addr == data_end - 4))) {
3776
3777 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3778 }
3779 rc = bnx2_nvram_write_dword(bp, addr, buf,
3780 cmd_flags);
3781
3782 if (rc != 0)
3783 goto nvram_write_end;
3784
3785 cmd_flags = 0;
3786 buf += 4;
3787 }
3788
3789 /* Loop to write back the buffer data from data_end
3790 * to page_end */
3791 if (bp->flash_info->buffered == 0) {
3792 for (addr = data_end; addr < page_end;
3793 addr += 4, i += 4) {
6aa20a22 3794
b6016b76
MC
3795 if (addr == page_end-4) {
3796 cmd_flags = BNX2_NVM_COMMAND_LAST;
3797 }
3798 rc = bnx2_nvram_write_dword(bp, addr,
3799 &flash_buffer[i], cmd_flags);
3800
3801 if (rc != 0)
3802 goto nvram_write_end;
3803
3804 cmd_flags = 0;
3805 }
3806 }
3807
3808 /* Disable writes to flash interface (lock write-protect) */
3809 bnx2_disable_nvram_write(bp);
3810
3811 /* Disable access to flash interface */
3812 bnx2_disable_nvram_access(bp);
3813 bnx2_release_nvram_lock(bp);
3814
3815 /* Increment written */
3816 written += data_end - data_start;
3817 }
3818
3819nvram_write_end:
e6be763f
MC
3820 kfree(flash_buffer);
3821 kfree(align_buf);
b6016b76
MC
3822 return rc;
3823}
3824
0d8a6571
MC
3825static void
3826bnx2_init_remote_phy(struct bnx2 *bp)
3827{
3828 u32 val;
3829
3830 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3831 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3832 return;
3833
3834 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3835 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3836 return;
3837
3838 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3839 if (netif_running(bp->dev)) {
3840 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3841 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3842 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3843 val);
3844 }
3845 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3846
3847 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3848 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3849 bp->phy_port = PORT_FIBRE;
3850 else
3851 bp->phy_port = PORT_TP;
3852 }
3853}
3854
b6016b76
MC
3855static int
3856bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3857{
3858 u32 val;
3859 int i, rc = 0;
3860
3861 /* Wait for the current PCI transaction to complete before
3862 * issuing a reset. */
3863 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3864 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3865 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3866 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3867 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3868 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3869 udelay(5);
3870
b090ae2b
MC
3871 /* Wait for the firmware to tell us it is ok to issue a reset. */
3872 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3873
b6016b76
MC
3874 /* Deposit a driver reset signature so the firmware knows that
3875 * this is a soft reset. */
e3648b3d 3876 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
3877 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3878
b6016b76
MC
3879 /* Do a dummy read to force the chip to complete all current transaction
3880 * before we issue a reset. */
3881 val = REG_RD(bp, BNX2_MISC_ID);
3882
234754d5
MC
3883 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3884 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3885 REG_RD(bp, BNX2_MISC_COMMAND);
3886 udelay(5);
b6016b76 3887
234754d5
MC
3888 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3889 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 3890
234754d5 3891 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 3892
234754d5
MC
3893 } else {
3894 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3895 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3896 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3897
3898 /* Chip reset. */
3899 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3900
3901 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3902 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3903 current->state = TASK_UNINTERRUPTIBLE;
3904 schedule_timeout(HZ / 50);
b6016b76 3905 }
b6016b76 3906
234754d5
MC
3907 /* Reset takes approximate 30 usec */
3908 for (i = 0; i < 10; i++) {
3909 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3910 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3911 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3912 break;
3913 udelay(10);
3914 }
3915
3916 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3917 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3918 printk(KERN_ERR PFX "Chip reset did not complete\n");
3919 return -EBUSY;
3920 }
b6016b76
MC
3921 }
3922
3923 /* Make sure byte swapping is properly configured. */
3924 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3925 if (val != 0x01020304) {
3926 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3927 return -ENODEV;
3928 }
3929
b6016b76 3930 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
3931 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3932 if (rc)
3933 return rc;
b6016b76 3934
0d8a6571
MC
3935 spin_lock_bh(&bp->phy_lock);
3936 bnx2_init_remote_phy(bp);
3937 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3938 bnx2_set_default_remote_link(bp);
3939 spin_unlock_bh(&bp->phy_lock);
3940
b6016b76
MC
3941 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3942 /* Adjust the voltage regular to two steps lower. The default
3943 * of this register is 0x0000000e. */
3944 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3945
3946 /* Remove bad rbuf memory from the free pool. */
3947 rc = bnx2_alloc_bad_rbuf(bp);
3948 }
3949
3950 return rc;
3951}
3952
3953static int
3954bnx2_init_chip(struct bnx2 *bp)
3955{
3956 u32 val;
b090ae2b 3957 int rc;
b6016b76
MC
3958
3959 /* Make sure the interrupt is not active. */
3960 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3961
3962 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3963 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3964#ifdef __BIG_ENDIAN
6aa20a22 3965 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 3966#endif
6aa20a22 3967 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
3968 DMA_READ_CHANS << 12 |
3969 DMA_WRITE_CHANS << 16;
3970
3971 val |= (0x2 << 20) | (1 << 11);
3972
dda1e390 3973 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
3974 val |= (1 << 23);
3975
3976 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3977 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3978 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3979
3980 REG_WR(bp, BNX2_DMA_CONFIG, val);
3981
3982 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3983 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3984 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3985 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3986 }
3987
3988 if (bp->flags & PCIX_FLAG) {
3989 u16 val16;
3990
3991 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3992 &val16);
3993 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3994 val16 & ~PCI_X_CMD_ERO);
3995 }
3996
3997 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3998 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3999 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4000 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4001
4002 /* Initialize context mapping and zero out the quick contexts. The
4003 * context block must have already been enabled. */
641bdcd5
MC
4004 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4005 rc = bnx2_init_5709_context(bp);
4006 if (rc)
4007 return rc;
4008 } else
59b47d8a 4009 bnx2_init_context(bp);
b6016b76 4010
fba9fe91
MC
4011 if ((rc = bnx2_init_cpus(bp)) != 0)
4012 return rc;
4013
b6016b76
MC
4014 bnx2_init_nvram(bp);
4015
4016 bnx2_set_mac_addr(bp);
4017
4018 val = REG_RD(bp, BNX2_MQ_CONFIG);
4019 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4020 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
4021 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4022 val |= BNX2_MQ_CONFIG_HALT_DIS;
4023
b6016b76
MC
4024 REG_WR(bp, BNX2_MQ_CONFIG, val);
4025
4026 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4027 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4028 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4029
4030 val = (BCM_PAGE_BITS - 8) << 24;
4031 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4032
4033 /* Configure page size. */
4034 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4035 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4036 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4037 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4038
4039 val = bp->mac_addr[0] +
4040 (bp->mac_addr[1] << 8) +
4041 (bp->mac_addr[2] << 16) +
4042 bp->mac_addr[3] +
4043 (bp->mac_addr[4] << 8) +
4044 (bp->mac_addr[5] << 16);
4045 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4046
4047 /* Program the MTU. Also include 4 bytes for CRC32. */
4048 val = bp->dev->mtu + ETH_HLEN + 4;
4049 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4050 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4051 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4052
4053 bp->last_status_idx = 0;
4054 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4055
4056 /* Set up how to generate a link change interrupt. */
4057 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4058
4059 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4060 (u64) bp->status_blk_mapping & 0xffffffff);
4061 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4062
4063 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4064 (u64) bp->stats_blk_mapping & 0xffffffff);
4065 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4066 (u64) bp->stats_blk_mapping >> 32);
4067
6aa20a22 4068 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4069 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4070
4071 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4072 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4073
4074 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4075 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4076
4077 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4078
4079 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4080
4081 REG_WR(bp, BNX2_HC_COM_TICKS,
4082 (bp->com_ticks_int << 16) | bp->com_ticks);
4083
4084 REG_WR(bp, BNX2_HC_CMD_TICKS,
4085 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4086
02537b06
MC
4087 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4088 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4089 else
4090 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
b6016b76
MC
4091 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4092
4093 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4094 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4095 else {
8e6a72c4
MC
4096 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4097 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4098 }
4099
8e6a72c4
MC
4100 if (bp->flags & ONE_SHOT_MSI_FLAG)
4101 val |= BNX2_HC_CONFIG_ONE_SHOT;
4102
4103 REG_WR(bp, BNX2_HC_CONFIG, val);
4104
b6016b76
MC
4105 /* Clear internal stats counters. */
4106 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4107
da3e4fbe 4108 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76 4109
e29054f9
MC
4110 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4111 BNX2_PORT_FEATURE_ASF_ENABLED)
4112 bp->flags |= ASF_ENABLE_FLAG;
4113
b6016b76
MC
4114 /* Initialize the receive filter. */
4115 bnx2_set_rx_mode(bp->dev);
4116
0aa38df7
MC
4117 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4118 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4119 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4120 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4121 }
b090ae2b
MC
4122 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4123 0);
b6016b76
MC
4124
4125 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
4126 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4127
4128 udelay(20);
4129
bf5295bb
MC
4130 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4131
b090ae2b 4132 return rc;
b6016b76
MC
4133}
4134
59b47d8a
MC
4135static void
4136bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4137{
4138 u32 val, offset0, offset1, offset2, offset3;
4139
4140 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4141 offset0 = BNX2_L2CTX_TYPE_XI;
4142 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4143 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4144 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4145 } else {
4146 offset0 = BNX2_L2CTX_TYPE;
4147 offset1 = BNX2_L2CTX_CMD_TYPE;
4148 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4149 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4150 }
4151 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4152 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4153
4154 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4155 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4156
4157 val = (u64) bp->tx_desc_mapping >> 32;
4158 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4159
4160 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4161 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4162}
b6016b76
MC
4163
4164static void
4165bnx2_init_tx_ring(struct bnx2 *bp)
4166{
4167 struct tx_bd *txbd;
59b47d8a 4168 u32 cid;
b6016b76 4169
2f8af120
MC
4170 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4171
b6016b76 4172 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4173
b6016b76
MC
4174 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4175 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4176
4177 bp->tx_prod = 0;
4178 bp->tx_cons = 0;
f4e418f7 4179 bp->hw_tx_cons = 0;
b6016b76 4180 bp->tx_prod_bseq = 0;
6aa20a22 4181
59b47d8a
MC
4182 cid = TX_CID;
4183 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4184 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4185
59b47d8a 4186 bnx2_init_tx_context(bp, cid);
b6016b76
MC
4187}
4188
4189static void
4190bnx2_init_rx_ring(struct bnx2 *bp)
4191{
4192 struct rx_bd *rxbd;
4193 int i;
6aa20a22 4194 u16 prod, ring_prod;
b6016b76
MC
4195 u32 val;
4196
4197 /* 8 for CRC and VLAN */
4198 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
59b47d8a
MC
4199 /* hw alignment */
4200 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
b6016b76
MC
4201
4202 ring_prod = prod = bp->rx_prod = 0;
4203 bp->rx_cons = 0;
f4e418f7 4204 bp->hw_rx_cons = 0;
b6016b76 4205 bp->rx_prod_bseq = 0;
6aa20a22 4206
13daffa2
MC
4207 for (i = 0; i < bp->rx_max_ring; i++) {
4208 int j;
b6016b76 4209
13daffa2
MC
4210 rxbd = &bp->rx_desc_ring[i][0];
4211 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4212 rxbd->rx_bd_len = bp->rx_buf_use_size;
4213 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4214 }
4215 if (i == (bp->rx_max_ring - 1))
4216 j = 0;
4217 else
4218 j = i + 1;
4219 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4220 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4221 0xffffffff;
4222 }
b6016b76
MC
4223
4224 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4225 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4226 val |= 0x02 << 8;
4227 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4228
13daffa2 4229 val = (u64) bp->rx_desc_mapping[0] >> 32;
b6016b76
MC
4230 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4231
13daffa2 4232 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
b6016b76
MC
4233 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4234
236b6394 4235 for (i = 0; i < bp->rx_ring_size; i++) {
b6016b76
MC
4236 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4237 break;
4238 }
4239 prod = NEXT_RX_BD(prod);
4240 ring_prod = RX_RING_IDX(prod);
4241 }
4242 bp->rx_prod = prod;
4243
4244 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4245
4246 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4247}
4248
13daffa2
MC
4249static void
4250bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4251{
4252 u32 num_rings, max;
4253
4254 bp->rx_ring_size = size;
4255 num_rings = 1;
4256 while (size > MAX_RX_DESC_CNT) {
4257 size -= MAX_RX_DESC_CNT;
4258 num_rings++;
4259 }
4260 /* round to next power of 2 */
4261 max = MAX_RX_RINGS;
4262 while ((max & num_rings) == 0)
4263 max >>= 1;
4264
4265 if (num_rings != max)
4266 max <<= 1;
4267
4268 bp->rx_max_ring = max;
4269 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4270}
4271
b6016b76
MC
4272static void
4273bnx2_free_tx_skbs(struct bnx2 *bp)
4274{
4275 int i;
4276
4277 if (bp->tx_buf_ring == NULL)
4278 return;
4279
4280 for (i = 0; i < TX_DESC_CNT; ) {
4281 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4282 struct sk_buff *skb = tx_buf->skb;
4283 int j, last;
4284
4285 if (skb == NULL) {
4286 i++;
4287 continue;
4288 }
4289
4290 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4291 skb_headlen(skb), PCI_DMA_TODEVICE);
4292
4293 tx_buf->skb = NULL;
4294
4295 last = skb_shinfo(skb)->nr_frags;
4296 for (j = 0; j < last; j++) {
4297 tx_buf = &bp->tx_buf_ring[i + j + 1];
4298 pci_unmap_page(bp->pdev,
4299 pci_unmap_addr(tx_buf, mapping),
4300 skb_shinfo(skb)->frags[j].size,
4301 PCI_DMA_TODEVICE);
4302 }
745720e5 4303 dev_kfree_skb(skb);
b6016b76
MC
4304 i += j + 1;
4305 }
4306
4307}
4308
4309static void
4310bnx2_free_rx_skbs(struct bnx2 *bp)
4311{
4312 int i;
4313
4314 if (bp->rx_buf_ring == NULL)
4315 return;
4316
13daffa2 4317 for (i = 0; i < bp->rx_max_ring_idx; i++) {
b6016b76
MC
4318 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4319 struct sk_buff *skb = rx_buf->skb;
4320
05d0f1cf 4321 if (skb == NULL)
b6016b76
MC
4322 continue;
4323
4324 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4325 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4326
4327 rx_buf->skb = NULL;
4328
745720e5 4329 dev_kfree_skb(skb);
b6016b76
MC
4330 }
4331}
4332
4333static void
4334bnx2_free_skbs(struct bnx2 *bp)
4335{
4336 bnx2_free_tx_skbs(bp);
4337 bnx2_free_rx_skbs(bp);
4338}
4339
4340static int
4341bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4342{
4343 int rc;
4344
4345 rc = bnx2_reset_chip(bp, reset_code);
4346 bnx2_free_skbs(bp);
4347 if (rc)
4348 return rc;
4349
fba9fe91
MC
4350 if ((rc = bnx2_init_chip(bp)) != 0)
4351 return rc;
4352
b6016b76
MC
4353 bnx2_init_tx_ring(bp);
4354 bnx2_init_rx_ring(bp);
4355 return 0;
4356}
4357
4358static int
4359bnx2_init_nic(struct bnx2 *bp)
4360{
4361 int rc;
4362
4363 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4364 return rc;
4365
80be4434 4366 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4367 bnx2_init_phy(bp);
4368 bnx2_set_link(bp);
0d8a6571 4369 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4370 return 0;
4371}
4372
4373static int
4374bnx2_test_registers(struct bnx2 *bp)
4375{
4376 int ret;
5bae30c9 4377 int i, is_5709;
f71e1309 4378 static const struct {
b6016b76
MC
4379 u16 offset;
4380 u16 flags;
5bae30c9 4381#define BNX2_FL_NOT_5709 1
b6016b76
MC
4382 u32 rw_mask;
4383 u32 ro_mask;
4384 } reg_tbl[] = {
4385 { 0x006c, 0, 0x00000000, 0x0000003f },
4386 { 0x0090, 0, 0xffffffff, 0x00000000 },
4387 { 0x0094, 0, 0x00000000, 0x00000000 },
4388
5bae30c9
MC
4389 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4390 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4391 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4392 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4393 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4394 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4395 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4396 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4397 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4398
4399 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4400 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4401 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4402 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4403 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4404 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4405
4406 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4407 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4408 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
4409
4410 { 0x1000, 0, 0x00000000, 0x00000001 },
4411 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
4412
4413 { 0x1408, 0, 0x01c00800, 0x00000000 },
4414 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4415 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 4416 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
4417 { 0x14b0, 0, 0x00000002, 0x00000001 },
4418 { 0x14b8, 0, 0x00000000, 0x00000000 },
4419 { 0x14c0, 0, 0x00000000, 0x00000009 },
4420 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4421 { 0x14cc, 0, 0x00000000, 0x00000001 },
4422 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
4423
4424 { 0x1800, 0, 0x00000000, 0x00000001 },
4425 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
4426
4427 { 0x2800, 0, 0x00000000, 0x00000001 },
4428 { 0x2804, 0, 0x00000000, 0x00003f01 },
4429 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4430 { 0x2810, 0, 0xffff0000, 0x00000000 },
4431 { 0x2814, 0, 0xffff0000, 0x00000000 },
4432 { 0x2818, 0, 0xffff0000, 0x00000000 },
4433 { 0x281c, 0, 0xffff0000, 0x00000000 },
4434 { 0x2834, 0, 0xffffffff, 0x00000000 },
4435 { 0x2840, 0, 0x00000000, 0xffffffff },
4436 { 0x2844, 0, 0x00000000, 0xffffffff },
4437 { 0x2848, 0, 0xffffffff, 0x00000000 },
4438 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4439
4440 { 0x2c00, 0, 0x00000000, 0x00000011 },
4441 { 0x2c04, 0, 0x00000000, 0x00030007 },
4442
b6016b76
MC
4443 { 0x3c00, 0, 0x00000000, 0x00000001 },
4444 { 0x3c04, 0, 0x00000000, 0x00070000 },
4445 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4446 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4447 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4448 { 0x3c14, 0, 0x00000000, 0xffffffff },
4449 { 0x3c18, 0, 0x00000000, 0xffffffff },
4450 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4451 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
4452
4453 { 0x5004, 0, 0x00000000, 0x0000007f },
4454 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 4455
b6016b76
MC
4456 { 0x5c00, 0, 0x00000000, 0x00000001 },
4457 { 0x5c04, 0, 0x00000000, 0x0003000f },
4458 { 0x5c08, 0, 0x00000003, 0x00000000 },
4459 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4460 { 0x5c10, 0, 0x00000000, 0xffffffff },
4461 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4462 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4463 { 0x5c88, 0, 0x00000000, 0x00077373 },
4464 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4465
4466 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4467 { 0x680c, 0, 0xffffffff, 0x00000000 },
4468 { 0x6810, 0, 0xffffffff, 0x00000000 },
4469 { 0x6814, 0, 0xffffffff, 0x00000000 },
4470 { 0x6818, 0, 0xffffffff, 0x00000000 },
4471 { 0x681c, 0, 0xffffffff, 0x00000000 },
4472 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4473 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4474 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4475 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4476 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4477 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4478 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4479 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4480 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4481 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4482 { 0x684c, 0, 0xffffffff, 0x00000000 },
4483 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4484 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4485 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4486 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4487 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4488 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4489
4490 { 0xffff, 0, 0x00000000, 0x00000000 },
4491 };
4492
4493 ret = 0;
5bae30c9
MC
4494 is_5709 = 0;
4495 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4496 is_5709 = 1;
4497
b6016b76
MC
4498 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4499 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
4500 u16 flags = reg_tbl[i].flags;
4501
4502 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4503 continue;
b6016b76
MC
4504
4505 offset = (u32) reg_tbl[i].offset;
4506 rw_mask = reg_tbl[i].rw_mask;
4507 ro_mask = reg_tbl[i].ro_mask;
4508
14ab9b86 4509 save_val = readl(bp->regview + offset);
b6016b76 4510
14ab9b86 4511 writel(0, bp->regview + offset);
b6016b76 4512
14ab9b86 4513 val = readl(bp->regview + offset);
b6016b76
MC
4514 if ((val & rw_mask) != 0) {
4515 goto reg_test_err;
4516 }
4517
4518 if ((val & ro_mask) != (save_val & ro_mask)) {
4519 goto reg_test_err;
4520 }
4521
14ab9b86 4522 writel(0xffffffff, bp->regview + offset);
b6016b76 4523
14ab9b86 4524 val = readl(bp->regview + offset);
b6016b76
MC
4525 if ((val & rw_mask) != rw_mask) {
4526 goto reg_test_err;
4527 }
4528
4529 if ((val & ro_mask) != (save_val & ro_mask)) {
4530 goto reg_test_err;
4531 }
4532
14ab9b86 4533 writel(save_val, bp->regview + offset);
b6016b76
MC
4534 continue;
4535
4536reg_test_err:
14ab9b86 4537 writel(save_val, bp->regview + offset);
b6016b76
MC
4538 ret = -ENODEV;
4539 break;
4540 }
4541 return ret;
4542}
4543
4544static int
4545bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4546{
f71e1309 4547 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
4548 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4549 int i;
4550
4551 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4552 u32 offset;
4553
4554 for (offset = 0; offset < size; offset += 4) {
4555
4556 REG_WR_IND(bp, start + offset, test_pattern[i]);
4557
4558 if (REG_RD_IND(bp, start + offset) !=
4559 test_pattern[i]) {
4560 return -ENODEV;
4561 }
4562 }
4563 }
4564 return 0;
4565}
4566
4567static int
4568bnx2_test_memory(struct bnx2 *bp)
4569{
4570 int ret = 0;
4571 int i;
5bae30c9 4572 static struct mem_entry {
b6016b76
MC
4573 u32 offset;
4574 u32 len;
5bae30c9 4575 } mem_tbl_5706[] = {
b6016b76 4576 { 0x60000, 0x4000 },
5b0c76ad 4577 { 0xa0000, 0x3000 },
b6016b76
MC
4578 { 0xe0000, 0x4000 },
4579 { 0x120000, 0x4000 },
4580 { 0x1a0000, 0x4000 },
4581 { 0x160000, 0x4000 },
4582 { 0xffffffff, 0 },
5bae30c9
MC
4583 },
4584 mem_tbl_5709[] = {
4585 { 0x60000, 0x4000 },
4586 { 0xa0000, 0x3000 },
4587 { 0xe0000, 0x4000 },
4588 { 0x120000, 0x4000 },
4589 { 0x1a0000, 0x4000 },
4590 { 0xffffffff, 0 },
b6016b76 4591 };
5bae30c9
MC
4592 struct mem_entry *mem_tbl;
4593
4594 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4595 mem_tbl = mem_tbl_5709;
4596 else
4597 mem_tbl = mem_tbl_5706;
b6016b76
MC
4598
4599 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4600 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4601 mem_tbl[i].len)) != 0) {
4602 return ret;
4603 }
4604 }
6aa20a22 4605
b6016b76
MC
4606 return ret;
4607}
4608
bc5a0690
MC
4609#define BNX2_MAC_LOOPBACK 0
4610#define BNX2_PHY_LOOPBACK 1
4611
b6016b76 4612static int
bc5a0690 4613bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
4614{
4615 unsigned int pkt_size, num_pkts, i;
4616 struct sk_buff *skb, *rx_skb;
4617 unsigned char *packet;
bc5a0690 4618 u16 rx_start_idx, rx_idx;
b6016b76
MC
4619 dma_addr_t map;
4620 struct tx_bd *txbd;
4621 struct sw_bd *rx_buf;
4622 struct l2_fhdr *rx_hdr;
4623 int ret = -ENODEV;
4624
bc5a0690
MC
4625 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4626 bp->loopback = MAC_LOOPBACK;
4627 bnx2_set_mac_loopback(bp);
4628 }
4629 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
80be4434 4630 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
4631 bnx2_set_phy_loopback(bp);
4632 }
4633 else
4634 return -EINVAL;
b6016b76
MC
4635
4636 pkt_size = 1514;
932f3772 4637 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
4638 if (!skb)
4639 return -ENOMEM;
b6016b76 4640 packet = skb_put(skb, pkt_size);
6634292b 4641 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
4642 memset(packet + 6, 0x0, 8);
4643 for (i = 14; i < pkt_size; i++)
4644 packet[i] = (unsigned char) (i & 0xff);
4645
4646 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4647 PCI_DMA_TODEVICE);
4648
bf5295bb
MC
4649 REG_WR(bp, BNX2_HC_COMMAND,
4650 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4651
b6016b76
MC
4652 REG_RD(bp, BNX2_HC_COMMAND);
4653
4654 udelay(5);
4655 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4656
b6016b76
MC
4657 num_pkts = 0;
4658
bc5a0690 4659 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
4660
4661 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4662 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4663 txbd->tx_bd_mss_nbytes = pkt_size;
4664 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4665
4666 num_pkts++;
bc5a0690
MC
4667 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4668 bp->tx_prod_bseq += pkt_size;
b6016b76 4669
234754d5
MC
4670 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4671 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
4672
4673 udelay(100);
4674
bf5295bb
MC
4675 REG_WR(bp, BNX2_HC_COMMAND,
4676 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4677
b6016b76
MC
4678 REG_RD(bp, BNX2_HC_COMMAND);
4679
4680 udelay(5);
4681
4682 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 4683 dev_kfree_skb(skb);
b6016b76 4684
bc5a0690 4685 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
b6016b76
MC
4686 goto loopback_test_done;
4687 }
4688
4689 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4690 if (rx_idx != rx_start_idx + num_pkts) {
4691 goto loopback_test_done;
4692 }
4693
4694 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4695 rx_skb = rx_buf->skb;
4696
4697 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4698 skb_reserve(rx_skb, bp->rx_offset);
4699
4700 pci_dma_sync_single_for_cpu(bp->pdev,
4701 pci_unmap_addr(rx_buf, mapping),
4702 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4703
ade2bfe7 4704 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
4705 (L2_FHDR_ERRORS_BAD_CRC |
4706 L2_FHDR_ERRORS_PHY_DECODE |
4707 L2_FHDR_ERRORS_ALIGNMENT |
4708 L2_FHDR_ERRORS_TOO_SHORT |
4709 L2_FHDR_ERRORS_GIANT_FRAME)) {
4710
4711 goto loopback_test_done;
4712 }
4713
4714 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4715 goto loopback_test_done;
4716 }
4717
4718 for (i = 14; i < pkt_size; i++) {
4719 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4720 goto loopback_test_done;
4721 }
4722 }
4723
4724 ret = 0;
4725
4726loopback_test_done:
4727 bp->loopback = 0;
4728 return ret;
4729}
4730
bc5a0690
MC
4731#define BNX2_MAC_LOOPBACK_FAILED 1
4732#define BNX2_PHY_LOOPBACK_FAILED 2
4733#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4734 BNX2_PHY_LOOPBACK_FAILED)
4735
4736static int
4737bnx2_test_loopback(struct bnx2 *bp)
4738{
4739 int rc = 0;
4740
4741 if (!netif_running(bp->dev))
4742 return BNX2_LOOPBACK_FAILED;
4743
4744 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4745 spin_lock_bh(&bp->phy_lock);
4746 bnx2_init_phy(bp);
4747 spin_unlock_bh(&bp->phy_lock);
4748 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4749 rc |= BNX2_MAC_LOOPBACK_FAILED;
4750 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4751 rc |= BNX2_PHY_LOOPBACK_FAILED;
4752 return rc;
4753}
4754
b6016b76
MC
4755#define NVRAM_SIZE 0x200
4756#define CRC32_RESIDUAL 0xdebb20e3
4757
4758static int
4759bnx2_test_nvram(struct bnx2 *bp)
4760{
4761 u32 buf[NVRAM_SIZE / 4];
4762 u8 *data = (u8 *) buf;
4763 int rc = 0;
4764 u32 magic, csum;
4765
4766 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4767 goto test_nvram_done;
4768
4769 magic = be32_to_cpu(buf[0]);
4770 if (magic != 0x669955aa) {
4771 rc = -ENODEV;
4772 goto test_nvram_done;
4773 }
4774
4775 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4776 goto test_nvram_done;
4777
4778 csum = ether_crc_le(0x100, data);
4779 if (csum != CRC32_RESIDUAL) {
4780 rc = -ENODEV;
4781 goto test_nvram_done;
4782 }
4783
4784 csum = ether_crc_le(0x100, data + 0x100);
4785 if (csum != CRC32_RESIDUAL) {
4786 rc = -ENODEV;
4787 }
4788
4789test_nvram_done:
4790 return rc;
4791}
4792
4793static int
4794bnx2_test_link(struct bnx2 *bp)
4795{
4796 u32 bmsr;
4797
c770a65c 4798 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
4799 bnx2_enable_bmsr1(bp);
4800 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4801 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4802 bnx2_disable_bmsr1(bp);
c770a65c 4803 spin_unlock_bh(&bp->phy_lock);
6aa20a22 4804
b6016b76
MC
4805 if (bmsr & BMSR_LSTATUS) {
4806 return 0;
4807 }
4808 return -ENODEV;
4809}
4810
4811static int
4812bnx2_test_intr(struct bnx2 *bp)
4813{
4814 int i;
b6016b76
MC
4815 u16 status_idx;
4816
4817 if (!netif_running(bp->dev))
4818 return -ENODEV;
4819
4820 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4821
4822 /* This register is not touched during run-time. */
bf5295bb 4823 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
4824 REG_RD(bp, BNX2_HC_COMMAND);
4825
4826 for (i = 0; i < 10; i++) {
4827 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4828 status_idx) {
4829
4830 break;
4831 }
4832
4833 msleep_interruptible(10);
4834 }
4835 if (i < 10)
4836 return 0;
4837
4838 return -ENODEV;
4839}
4840
4841static void
48b01e2d 4842bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 4843{
48b01e2d
MC
4844 spin_lock(&bp->phy_lock);
4845 if (bp->serdes_an_pending)
4846 bp->serdes_an_pending--;
4847 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4848 u32 bmcr;
b6016b76 4849
48b01e2d 4850 bp->current_interval = bp->timer_interval;
cd339a0e 4851
ca58c3af 4852 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 4853
48b01e2d
MC
4854 if (bmcr & BMCR_ANENABLE) {
4855 u32 phy1, phy2;
b6016b76 4856
48b01e2d
MC
4857 bnx2_write_phy(bp, 0x1c, 0x7c00);
4858 bnx2_read_phy(bp, 0x1c, &phy1);
cea94db9 4859
48b01e2d
MC
4860 bnx2_write_phy(bp, 0x17, 0x0f01);
4861 bnx2_read_phy(bp, 0x15, &phy2);
4862 bnx2_write_phy(bp, 0x17, 0x0f01);
4863 bnx2_read_phy(bp, 0x15, &phy2);
b6016b76 4864
48b01e2d
MC
4865 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4866 !(phy2 & 0x20)) { /* no CONFIG */
4867
4868 bmcr &= ~BMCR_ANENABLE;
4869 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 4870 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
48b01e2d
MC
4871 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4872 }
b6016b76 4873 }
48b01e2d
MC
4874 }
4875 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4876 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4877 u32 phy2;
b6016b76 4878
48b01e2d
MC
4879 bnx2_write_phy(bp, 0x17, 0x0f01);
4880 bnx2_read_phy(bp, 0x15, &phy2);
4881 if (phy2 & 0x20) {
4882 u32 bmcr;
cd339a0e 4883
ca58c3af 4884 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 4885 bmcr |= BMCR_ANENABLE;
ca58c3af 4886 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 4887
48b01e2d
MC
4888 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4889 }
4890 } else
4891 bp->current_interval = bp->timer_interval;
b6016b76 4892
48b01e2d
MC
4893 spin_unlock(&bp->phy_lock);
4894}
b6016b76 4895
f8dd064e
MC
4896static void
4897bnx2_5708_serdes_timer(struct bnx2 *bp)
4898{
0d8a6571
MC
4899 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4900 return;
4901
f8dd064e
MC
4902 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4903 bp->serdes_an_pending = 0;
4904 return;
4905 }
b6016b76 4906
f8dd064e
MC
4907 spin_lock(&bp->phy_lock);
4908 if (bp->serdes_an_pending)
4909 bp->serdes_an_pending--;
4910 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4911 u32 bmcr;
b6016b76 4912
ca58c3af 4913 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 4914 if (bmcr & BMCR_ANENABLE) {
605a9e20 4915 bnx2_enable_forced_2g5(bp);
f8dd064e
MC
4916 bp->current_interval = SERDES_FORCED_TIMEOUT;
4917 } else {
605a9e20 4918 bnx2_disable_forced_2g5(bp);
f8dd064e
MC
4919 bp->serdes_an_pending = 2;
4920 bp->current_interval = bp->timer_interval;
b6016b76 4921 }
b6016b76 4922
f8dd064e
MC
4923 } else
4924 bp->current_interval = bp->timer_interval;
b6016b76 4925
f8dd064e
MC
4926 spin_unlock(&bp->phy_lock);
4927}
4928
48b01e2d
MC
4929static void
4930bnx2_timer(unsigned long data)
4931{
4932 struct bnx2 *bp = (struct bnx2 *) data;
4933 u32 msg;
b6016b76 4934
48b01e2d
MC
4935 if (!netif_running(bp->dev))
4936 return;
b6016b76 4937
48b01e2d
MC
4938 if (atomic_read(&bp->intr_sem) != 0)
4939 goto bnx2_restart_timer;
b6016b76 4940
48b01e2d
MC
4941 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4942 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
b6016b76 4943
48b01e2d 4944 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 4945
02537b06
MC
4946 /* workaround occasional corrupted counters */
4947 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4948 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4949 BNX2_HC_COMMAND_STATS_NOW);
4950
f8dd064e
MC
4951 if (bp->phy_flags & PHY_SERDES_FLAG) {
4952 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4953 bnx2_5706_serdes_timer(bp);
27a005b8 4954 else
f8dd064e 4955 bnx2_5708_serdes_timer(bp);
b6016b76
MC
4956 }
4957
4958bnx2_restart_timer:
cd339a0e 4959 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4960}
4961
8e6a72c4
MC
4962static int
4963bnx2_request_irq(struct bnx2 *bp)
4964{
4965 struct net_device *dev = bp->dev;
4966 int rc = 0;
4967
4968 if (bp->flags & USING_MSI_FLAG) {
4969 irq_handler_t fn = bnx2_msi;
4970
4971 if (bp->flags & ONE_SHOT_MSI_FLAG)
4972 fn = bnx2_msi_1shot;
4973
4974 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4975 } else
4976 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4977 IRQF_SHARED, dev->name, dev);
4978 return rc;
4979}
4980
4981static void
4982bnx2_free_irq(struct bnx2 *bp)
4983{
4984 struct net_device *dev = bp->dev;
4985
4986 if (bp->flags & USING_MSI_FLAG) {
4987 free_irq(bp->pdev->irq, dev);
4988 pci_disable_msi(bp->pdev);
4989 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4990 } else
4991 free_irq(bp->pdev->irq, dev);
4992}
4993
b6016b76
MC
4994/* Called with rtnl_lock */
4995static int
4996bnx2_open(struct net_device *dev)
4997{
972ec0d4 4998 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4999 int rc;
5000
1b2f922f
MC
5001 netif_carrier_off(dev);
5002
829ca9a3 5003 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5004 bnx2_disable_int(bp);
5005
5006 rc = bnx2_alloc_mem(bp);
5007 if (rc)
5008 return rc;
5009
8e6a72c4 5010 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
b6016b76
MC
5011 if (pci_enable_msi(bp->pdev) == 0) {
5012 bp->flags |= USING_MSI_FLAG;
8e6a72c4
MC
5013 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5014 bp->flags |= ONE_SHOT_MSI_FLAG;
b6016b76 5015 }
b6016b76 5016 }
8e6a72c4
MC
5017 rc = bnx2_request_irq(bp);
5018
b6016b76
MC
5019 if (rc) {
5020 bnx2_free_mem(bp);
5021 return rc;
5022 }
5023
5024 rc = bnx2_init_nic(bp);
5025
5026 if (rc) {
8e6a72c4 5027 bnx2_free_irq(bp);
b6016b76
MC
5028 bnx2_free_skbs(bp);
5029 bnx2_free_mem(bp);
5030 return rc;
5031 }
6aa20a22 5032
cd339a0e 5033 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5034
5035 atomic_set(&bp->intr_sem, 0);
5036
5037 bnx2_enable_int(bp);
5038
5039 if (bp->flags & USING_MSI_FLAG) {
5040 /* Test MSI to make sure it is working
5041 * If MSI test fails, go back to INTx mode
5042 */
5043 if (bnx2_test_intr(bp) != 0) {
5044 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5045 " using MSI, switching to INTx mode. Please"
5046 " report this failure to the PCI maintainer"
5047 " and include system chipset information.\n",
5048 bp->dev->name);
5049
5050 bnx2_disable_int(bp);
8e6a72c4 5051 bnx2_free_irq(bp);
b6016b76
MC
5052
5053 rc = bnx2_init_nic(bp);
5054
8e6a72c4
MC
5055 if (!rc)
5056 rc = bnx2_request_irq(bp);
5057
b6016b76
MC
5058 if (rc) {
5059 bnx2_free_skbs(bp);
5060 bnx2_free_mem(bp);
5061 del_timer_sync(&bp->timer);
5062 return rc;
5063 }
5064 bnx2_enable_int(bp);
5065 }
5066 }
5067 if (bp->flags & USING_MSI_FLAG) {
5068 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5069 }
5070
5071 netif_start_queue(dev);
5072
5073 return 0;
5074}
5075
5076static void
c4028958 5077bnx2_reset_task(struct work_struct *work)
b6016b76 5078{
c4028958 5079 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 5080
afdc08b9
MC
5081 if (!netif_running(bp->dev))
5082 return;
5083
5084 bp->in_reset_task = 1;
b6016b76
MC
5085 bnx2_netif_stop(bp);
5086
5087 bnx2_init_nic(bp);
5088
5089 atomic_set(&bp->intr_sem, 1);
5090 bnx2_netif_start(bp);
afdc08b9 5091 bp->in_reset_task = 0;
b6016b76
MC
5092}
5093
5094static void
5095bnx2_tx_timeout(struct net_device *dev)
5096{
972ec0d4 5097 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5098
5099 /* This allows the netif to be shutdown gracefully before resetting */
5100 schedule_work(&bp->reset_task);
5101}
5102
5103#ifdef BCM_VLAN
5104/* Called with rtnl_lock */
5105static void
5106bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5107{
972ec0d4 5108 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5109
5110 bnx2_netif_stop(bp);
5111
5112 bp->vlgrp = vlgrp;
5113 bnx2_set_rx_mode(dev);
5114
5115 bnx2_netif_start(bp);
5116}
b6016b76
MC
5117#endif
5118
932ff279 5119/* Called with netif_tx_lock.
2f8af120
MC
5120 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5121 * netif_wake_queue().
b6016b76
MC
5122 */
5123static int
5124bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5125{
972ec0d4 5126 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5127 dma_addr_t mapping;
5128 struct tx_bd *txbd;
5129 struct sw_bd *tx_buf;
5130 u32 len, vlan_tag_flags, last_frag, mss;
5131 u16 prod, ring_prod;
5132 int i;
5133
e89bbf10 5134 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
5135 netif_stop_queue(dev);
5136 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5137 dev->name);
5138
5139 return NETDEV_TX_BUSY;
5140 }
5141 len = skb_headlen(skb);
5142 prod = bp->tx_prod;
5143 ring_prod = TX_RING_IDX(prod);
5144
5145 vlan_tag_flags = 0;
84fa7933 5146 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
5147 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5148 }
5149
5150 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5151 vlan_tag_flags |=
5152 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5153 }
fde82055 5154 if ((mss = skb_shinfo(skb)->gso_size)) {
b6016b76 5155 u32 tcp_opt_len, ip_tcp_len;
eddc9ec5 5156 struct iphdr *iph;
b6016b76 5157
b6016b76
MC
5158 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5159
4666f87a
MC
5160 tcp_opt_len = tcp_optlen(skb);
5161
5162 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5163 u32 tcp_off = skb_transport_offset(skb) -
5164 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 5165
4666f87a
MC
5166 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5167 TX_BD_FLAGS_SW_FLAGS;
5168 if (likely(tcp_off == 0))
5169 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5170 else {
5171 tcp_off >>= 3;
5172 vlan_tag_flags |= ((tcp_off & 0x3) <<
5173 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5174 ((tcp_off & 0x10) <<
5175 TX_BD_FLAGS_TCP6_OFF4_SHL);
5176 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5177 }
5178 } else {
5179 if (skb_header_cloned(skb) &&
5180 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5181 dev_kfree_skb(skb);
5182 return NETDEV_TX_OK;
5183 }
b6016b76 5184
4666f87a
MC
5185 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5186
5187 iph = ip_hdr(skb);
5188 iph->check = 0;
5189 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5190 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5191 iph->daddr, 0,
5192 IPPROTO_TCP,
5193 0);
5194 if (tcp_opt_len || (iph->ihl > 5)) {
5195 vlan_tag_flags |= ((iph->ihl - 5) +
5196 (tcp_opt_len >> 2)) << 8;
5197 }
b6016b76 5198 }
4666f87a 5199 } else
b6016b76 5200 mss = 0;
b6016b76
MC
5201
5202 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 5203
b6016b76
MC
5204 tx_buf = &bp->tx_buf_ring[ring_prod];
5205 tx_buf->skb = skb;
5206 pci_unmap_addr_set(tx_buf, mapping, mapping);
5207
5208 txbd = &bp->tx_desc_ring[ring_prod];
5209
5210 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5211 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5212 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5213 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5214
5215 last_frag = skb_shinfo(skb)->nr_frags;
5216
5217 for (i = 0; i < last_frag; i++) {
5218 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5219
5220 prod = NEXT_TX_BD(prod);
5221 ring_prod = TX_RING_IDX(prod);
5222 txbd = &bp->tx_desc_ring[ring_prod];
5223
5224 len = frag->size;
5225 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5226 len, PCI_DMA_TODEVICE);
5227 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5228 mapping, mapping);
5229
5230 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5231 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5232 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5233 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5234
5235 }
5236 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5237
5238 prod = NEXT_TX_BD(prod);
5239 bp->tx_prod_bseq += skb->len;
5240
234754d5
MC
5241 REG_WR16(bp, bp->tx_bidx_addr, prod);
5242 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
5243
5244 mmiowb();
5245
5246 bp->tx_prod = prod;
5247 dev->trans_start = jiffies;
5248
e89bbf10 5249 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
e89bbf10 5250 netif_stop_queue(dev);
2f8af120 5251 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
e89bbf10 5252 netif_wake_queue(dev);
b6016b76
MC
5253 }
5254
5255 return NETDEV_TX_OK;
5256}
5257
5258/* Called with rtnl_lock */
5259static int
5260bnx2_close(struct net_device *dev)
5261{
972ec0d4 5262 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5263 u32 reset_code;
5264
afdc08b9
MC
5265 /* Calling flush_scheduled_work() may deadlock because
5266 * linkwatch_event() may be on the workqueue and it will try to get
5267 * the rtnl_lock which we are holding.
5268 */
5269 while (bp->in_reset_task)
5270 msleep(1);
5271
b6016b76
MC
5272 bnx2_netif_stop(bp);
5273 del_timer_sync(&bp->timer);
dda1e390 5274 if (bp->flags & NO_WOL_FLAG)
6c4f095e 5275 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 5276 else if (bp->wol)
b6016b76
MC
5277 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5278 else
5279 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5280 bnx2_reset_chip(bp, reset_code);
8e6a72c4 5281 bnx2_free_irq(bp);
b6016b76
MC
5282 bnx2_free_skbs(bp);
5283 bnx2_free_mem(bp);
5284 bp->link_up = 0;
5285 netif_carrier_off(bp->dev);
829ca9a3 5286 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
5287 return 0;
5288}
5289
5290#define GET_NET_STATS64(ctr) \
5291 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5292 (unsigned long) (ctr##_lo)
5293
5294#define GET_NET_STATS32(ctr) \
5295 (ctr##_lo)
5296
5297#if (BITS_PER_LONG == 64)
5298#define GET_NET_STATS GET_NET_STATS64
5299#else
5300#define GET_NET_STATS GET_NET_STATS32
5301#endif
5302
5303static struct net_device_stats *
5304bnx2_get_stats(struct net_device *dev)
5305{
972ec0d4 5306 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5307 struct statistics_block *stats_blk = bp->stats_blk;
5308 struct net_device_stats *net_stats = &bp->net_stats;
5309
5310 if (bp->stats_blk == NULL) {
5311 return net_stats;
5312 }
5313 net_stats->rx_packets =
5314 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5315 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5316 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5317
5318 net_stats->tx_packets =
5319 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5320 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5321 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5322
5323 net_stats->rx_bytes =
5324 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5325
5326 net_stats->tx_bytes =
5327 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5328
6aa20a22 5329 net_stats->multicast =
b6016b76
MC
5330 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5331
6aa20a22 5332 net_stats->collisions =
b6016b76
MC
5333 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5334
6aa20a22 5335 net_stats->rx_length_errors =
b6016b76
MC
5336 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5337 stats_blk->stat_EtherStatsOverrsizePkts);
5338
6aa20a22 5339 net_stats->rx_over_errors =
b6016b76
MC
5340 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5341
6aa20a22 5342 net_stats->rx_frame_errors =
b6016b76
MC
5343 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5344
6aa20a22 5345 net_stats->rx_crc_errors =
b6016b76
MC
5346 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5347
5348 net_stats->rx_errors = net_stats->rx_length_errors +
5349 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5350 net_stats->rx_crc_errors;
5351
5352 net_stats->tx_aborted_errors =
5353 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5354 stats_blk->stat_Dot3StatsLateCollisions);
5355
5b0c76ad
MC
5356 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5357 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
5358 net_stats->tx_carrier_errors = 0;
5359 else {
5360 net_stats->tx_carrier_errors =
5361 (unsigned long)
5362 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5363 }
5364
5365 net_stats->tx_errors =
6aa20a22 5366 (unsigned long)
b6016b76
MC
5367 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5368 +
5369 net_stats->tx_aborted_errors +
5370 net_stats->tx_carrier_errors;
5371
cea94db9
MC
5372 net_stats->rx_missed_errors =
5373 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5374 stats_blk->stat_FwRxDrop);
5375
b6016b76
MC
5376 return net_stats;
5377}
5378
5379/* All ethtool functions called with rtnl_lock */
5380
5381static int
5382bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5383{
972ec0d4 5384 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 5385 int support_serdes = 0, support_copper = 0;
b6016b76
MC
5386
5387 cmd->supported = SUPPORTED_Autoneg;
7b6b8347
MC
5388 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5389 support_serdes = 1;
5390 support_copper = 1;
5391 } else if (bp->phy_port == PORT_FIBRE)
5392 support_serdes = 1;
5393 else
5394 support_copper = 1;
5395
5396 if (support_serdes) {
b6016b76
MC
5397 cmd->supported |= SUPPORTED_1000baseT_Full |
5398 SUPPORTED_FIBRE;
605a9e20
MC
5399 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5400 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 5401
b6016b76 5402 }
7b6b8347 5403 if (support_copper) {
b6016b76
MC
5404 cmd->supported |= SUPPORTED_10baseT_Half |
5405 SUPPORTED_10baseT_Full |
5406 SUPPORTED_100baseT_Half |
5407 SUPPORTED_100baseT_Full |
5408 SUPPORTED_1000baseT_Full |
5409 SUPPORTED_TP;
5410
b6016b76
MC
5411 }
5412
7b6b8347
MC
5413 spin_lock_bh(&bp->phy_lock);
5414 cmd->port = bp->phy_port;
b6016b76
MC
5415 cmd->advertising = bp->advertising;
5416
5417 if (bp->autoneg & AUTONEG_SPEED) {
5418 cmd->autoneg = AUTONEG_ENABLE;
5419 }
5420 else {
5421 cmd->autoneg = AUTONEG_DISABLE;
5422 }
5423
5424 if (netif_carrier_ok(dev)) {
5425 cmd->speed = bp->line_speed;
5426 cmd->duplex = bp->duplex;
5427 }
5428 else {
5429 cmd->speed = -1;
5430 cmd->duplex = -1;
5431 }
7b6b8347 5432 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5433
5434 cmd->transceiver = XCVR_INTERNAL;
5435 cmd->phy_address = bp->phy_addr;
5436
5437 return 0;
5438}
6aa20a22 5439
b6016b76
MC
5440static int
5441bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5442{
972ec0d4 5443 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5444 u8 autoneg = bp->autoneg;
5445 u8 req_duplex = bp->req_duplex;
5446 u16 req_line_speed = bp->req_line_speed;
5447 u32 advertising = bp->advertising;
7b6b8347
MC
5448 int err = -EINVAL;
5449
5450 spin_lock_bh(&bp->phy_lock);
5451
5452 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5453 goto err_out_unlock;
5454
5455 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5456 goto err_out_unlock;
b6016b76
MC
5457
5458 if (cmd->autoneg == AUTONEG_ENABLE) {
5459 autoneg |= AUTONEG_SPEED;
5460
6aa20a22 5461 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5462
5463 /* allow advertising 1 speed */
5464 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5465 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5466 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5467 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5468
7b6b8347
MC
5469 if (cmd->port == PORT_FIBRE)
5470 goto err_out_unlock;
b6016b76
MC
5471
5472 advertising = cmd->advertising;
5473
27a005b8 5474 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
7b6b8347
MC
5475 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5476 (cmd->port == PORT_TP))
5477 goto err_out_unlock;
5478 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 5479 advertising = cmd->advertising;
7b6b8347
MC
5480 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5481 goto err_out_unlock;
b6016b76 5482 else {
7b6b8347 5483 if (cmd->port == PORT_FIBRE)
b6016b76 5484 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 5485 else
b6016b76 5486 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5487 }
5488 advertising |= ADVERTISED_Autoneg;
5489 }
5490 else {
7b6b8347 5491 if (cmd->port == PORT_FIBRE) {
80be4434
MC
5492 if ((cmd->speed != SPEED_1000 &&
5493 cmd->speed != SPEED_2500) ||
5494 (cmd->duplex != DUPLEX_FULL))
7b6b8347 5495 goto err_out_unlock;
80be4434
MC
5496
5497 if (cmd->speed == SPEED_2500 &&
5498 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
7b6b8347 5499 goto err_out_unlock;
b6016b76 5500 }
7b6b8347
MC
5501 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5502 goto err_out_unlock;
5503
b6016b76
MC
5504 autoneg &= ~AUTONEG_SPEED;
5505 req_line_speed = cmd->speed;
5506 req_duplex = cmd->duplex;
5507 advertising = 0;
5508 }
5509
5510 bp->autoneg = autoneg;
5511 bp->advertising = advertising;
5512 bp->req_line_speed = req_line_speed;
5513 bp->req_duplex = req_duplex;
5514
7b6b8347 5515 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 5516
7b6b8347 5517err_out_unlock:
c770a65c 5518 spin_unlock_bh(&bp->phy_lock);
b6016b76 5519
7b6b8347 5520 return err;
b6016b76
MC
5521}
5522
5523static void
5524bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5525{
972ec0d4 5526 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5527
5528 strcpy(info->driver, DRV_MODULE_NAME);
5529 strcpy(info->version, DRV_MODULE_VERSION);
5530 strcpy(info->bus_info, pci_name(bp->pdev));
5531 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5532 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5533 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
206cc83c
MC
5534 info->fw_version[1] = info->fw_version[3] = '.';
5535 info->fw_version[5] = 0;
b6016b76
MC
5536}
5537
244ac4f4
MC
5538#define BNX2_REGDUMP_LEN (32 * 1024)
5539
5540static int
5541bnx2_get_regs_len(struct net_device *dev)
5542{
5543 return BNX2_REGDUMP_LEN;
5544}
5545
5546static void
5547bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5548{
5549 u32 *p = _p, i, offset;
5550 u8 *orig_p = _p;
5551 struct bnx2 *bp = netdev_priv(dev);
5552 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5553 0x0800, 0x0880, 0x0c00, 0x0c10,
5554 0x0c30, 0x0d08, 0x1000, 0x101c,
5555 0x1040, 0x1048, 0x1080, 0x10a4,
5556 0x1400, 0x1490, 0x1498, 0x14f0,
5557 0x1500, 0x155c, 0x1580, 0x15dc,
5558 0x1600, 0x1658, 0x1680, 0x16d8,
5559 0x1800, 0x1820, 0x1840, 0x1854,
5560 0x1880, 0x1894, 0x1900, 0x1984,
5561 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5562 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5563 0x2000, 0x2030, 0x23c0, 0x2400,
5564 0x2800, 0x2820, 0x2830, 0x2850,
5565 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5566 0x3c00, 0x3c94, 0x4000, 0x4010,
5567 0x4080, 0x4090, 0x43c0, 0x4458,
5568 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5569 0x4fc0, 0x5010, 0x53c0, 0x5444,
5570 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5571 0x5fc0, 0x6000, 0x6400, 0x6428,
5572 0x6800, 0x6848, 0x684c, 0x6860,
5573 0x6888, 0x6910, 0x8000 };
5574
5575 regs->version = 0;
5576
5577 memset(p, 0, BNX2_REGDUMP_LEN);
5578
5579 if (!netif_running(bp->dev))
5580 return;
5581
5582 i = 0;
5583 offset = reg_boundaries[0];
5584 p += offset;
5585 while (offset < BNX2_REGDUMP_LEN) {
5586 *p++ = REG_RD(bp, offset);
5587 offset += 4;
5588 if (offset == reg_boundaries[i + 1]) {
5589 offset = reg_boundaries[i + 2];
5590 p = (u32 *) (orig_p + offset);
5591 i += 2;
5592 }
5593 }
5594}
5595
b6016b76
MC
5596static void
5597bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5598{
972ec0d4 5599 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5600
5601 if (bp->flags & NO_WOL_FLAG) {
5602 wol->supported = 0;
5603 wol->wolopts = 0;
5604 }
5605 else {
5606 wol->supported = WAKE_MAGIC;
5607 if (bp->wol)
5608 wol->wolopts = WAKE_MAGIC;
5609 else
5610 wol->wolopts = 0;
5611 }
5612 memset(&wol->sopass, 0, sizeof(wol->sopass));
5613}
5614
5615static int
5616bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5617{
972ec0d4 5618 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5619
5620 if (wol->wolopts & ~WAKE_MAGIC)
5621 return -EINVAL;
5622
5623 if (wol->wolopts & WAKE_MAGIC) {
5624 if (bp->flags & NO_WOL_FLAG)
5625 return -EINVAL;
5626
5627 bp->wol = 1;
5628 }
5629 else {
5630 bp->wol = 0;
5631 }
5632 return 0;
5633}
5634
5635static int
5636bnx2_nway_reset(struct net_device *dev)
5637{
972ec0d4 5638 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5639 u32 bmcr;
5640
5641 if (!(bp->autoneg & AUTONEG_SPEED)) {
5642 return -EINVAL;
5643 }
5644
c770a65c 5645 spin_lock_bh(&bp->phy_lock);
b6016b76 5646
7b6b8347
MC
5647 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5648 int rc;
5649
5650 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5651 spin_unlock_bh(&bp->phy_lock);
5652 return rc;
5653 }
5654
b6016b76
MC
5655 /* Force a link down visible on the other side */
5656 if (bp->phy_flags & PHY_SERDES_FLAG) {
ca58c3af 5657 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 5658 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5659
5660 msleep(20);
5661
c770a65c 5662 spin_lock_bh(&bp->phy_lock);
f8dd064e
MC
5663
5664 bp->current_interval = SERDES_AN_TIMEOUT;
5665 bp->serdes_an_pending = 1;
5666 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5667 }
5668
ca58c3af 5669 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5670 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 5671 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 5672
c770a65c 5673 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5674
5675 return 0;
5676}
5677
5678static int
5679bnx2_get_eeprom_len(struct net_device *dev)
5680{
972ec0d4 5681 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5682
1122db71 5683 if (bp->flash_info == NULL)
b6016b76
MC
5684 return 0;
5685
1122db71 5686 return (int) bp->flash_size;
b6016b76
MC
5687}
5688
5689static int
5690bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5691 u8 *eebuf)
5692{
972ec0d4 5693 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5694 int rc;
5695
1064e944 5696 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
5697
5698 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5699
5700 return rc;
5701}
5702
5703static int
5704bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5705 u8 *eebuf)
5706{
972ec0d4 5707 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5708 int rc;
5709
1064e944 5710 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
5711
5712 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5713
5714 return rc;
5715}
5716
5717static int
5718bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5719{
972ec0d4 5720 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5721
5722 memset(coal, 0, sizeof(struct ethtool_coalesce));
5723
5724 coal->rx_coalesce_usecs = bp->rx_ticks;
5725 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5726 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5727 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5728
5729 coal->tx_coalesce_usecs = bp->tx_ticks;
5730 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5731 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5732 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5733
5734 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5735
5736 return 0;
5737}
5738
5739static int
5740bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5741{
972ec0d4 5742 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5743
5744 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5745 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5746
6aa20a22 5747 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
5748 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5749
5750 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5751 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5752
5753 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5754 if (bp->rx_quick_cons_trip_int > 0xff)
5755 bp->rx_quick_cons_trip_int = 0xff;
5756
5757 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5758 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5759
5760 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5761 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5762
5763 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5764 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5765
5766 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5767 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5768 0xff;
5769
5770 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
5771 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5772 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5773 bp->stats_ticks = USEC_PER_SEC;
5774 }
b6016b76
MC
5775 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5776 bp->stats_ticks &= 0xffff00;
5777
5778 if (netif_running(bp->dev)) {
5779 bnx2_netif_stop(bp);
5780 bnx2_init_nic(bp);
5781 bnx2_netif_start(bp);
5782 }
5783
5784 return 0;
5785}
5786
5787static void
5788bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5789{
972ec0d4 5790 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5791
13daffa2 5792 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76
MC
5793 ering->rx_mini_max_pending = 0;
5794 ering->rx_jumbo_max_pending = 0;
5795
5796 ering->rx_pending = bp->rx_ring_size;
5797 ering->rx_mini_pending = 0;
5798 ering->rx_jumbo_pending = 0;
5799
5800 ering->tx_max_pending = MAX_TX_DESC_CNT;
5801 ering->tx_pending = bp->tx_ring_size;
5802}
5803
5804static int
5805bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5806{
972ec0d4 5807 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5808
13daffa2 5809 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
b6016b76
MC
5810 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5811 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5812
5813 return -EINVAL;
5814 }
13daffa2
MC
5815 if (netif_running(bp->dev)) {
5816 bnx2_netif_stop(bp);
5817 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5818 bnx2_free_skbs(bp);
5819 bnx2_free_mem(bp);
5820 }
5821
5822 bnx2_set_rx_ring_size(bp, ering->rx_pending);
b6016b76
MC
5823 bp->tx_ring_size = ering->tx_pending;
5824
5825 if (netif_running(bp->dev)) {
13daffa2
MC
5826 int rc;
5827
5828 rc = bnx2_alloc_mem(bp);
5829 if (rc)
5830 return rc;
b6016b76
MC
5831 bnx2_init_nic(bp);
5832 bnx2_netif_start(bp);
5833 }
5834
5835 return 0;
5836}
5837
5838static void
5839bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5840{
972ec0d4 5841 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5842
5843 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5844 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5845 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5846}
5847
5848static int
5849bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5850{
972ec0d4 5851 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5852
5853 bp->req_flow_ctrl = 0;
5854 if (epause->rx_pause)
5855 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5856 if (epause->tx_pause)
5857 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5858
5859 if (epause->autoneg) {
5860 bp->autoneg |= AUTONEG_FLOW_CTRL;
5861 }
5862 else {
5863 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5864 }
5865
c770a65c 5866 spin_lock_bh(&bp->phy_lock);
b6016b76 5867
0d8a6571 5868 bnx2_setup_phy(bp, bp->phy_port);
b6016b76 5869
c770a65c 5870 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5871
5872 return 0;
5873}
5874
5875static u32
5876bnx2_get_rx_csum(struct net_device *dev)
5877{
972ec0d4 5878 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5879
5880 return bp->rx_csum;
5881}
5882
5883static int
5884bnx2_set_rx_csum(struct net_device *dev, u32 data)
5885{
972ec0d4 5886 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5887
5888 bp->rx_csum = data;
5889 return 0;
5890}
5891
b11d6213
MC
5892static int
5893bnx2_set_tso(struct net_device *dev, u32 data)
5894{
4666f87a
MC
5895 struct bnx2 *bp = netdev_priv(dev);
5896
5897 if (data) {
b11d6213 5898 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
5899 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5900 dev->features |= NETIF_F_TSO6;
5901 } else
5902 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5903 NETIF_F_TSO_ECN);
b11d6213
MC
5904 return 0;
5905}
5906
cea94db9 5907#define BNX2_NUM_STATS 46
b6016b76 5908
14ab9b86 5909static struct {
b6016b76
MC
5910 char string[ETH_GSTRING_LEN];
5911} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5912 { "rx_bytes" },
5913 { "rx_error_bytes" },
5914 { "tx_bytes" },
5915 { "tx_error_bytes" },
5916 { "rx_ucast_packets" },
5917 { "rx_mcast_packets" },
5918 { "rx_bcast_packets" },
5919 { "tx_ucast_packets" },
5920 { "tx_mcast_packets" },
5921 { "tx_bcast_packets" },
5922 { "tx_mac_errors" },
5923 { "tx_carrier_errors" },
5924 { "rx_crc_errors" },
5925 { "rx_align_errors" },
5926 { "tx_single_collisions" },
5927 { "tx_multi_collisions" },
5928 { "tx_deferred" },
5929 { "tx_excess_collisions" },
5930 { "tx_late_collisions" },
5931 { "tx_total_collisions" },
5932 { "rx_fragments" },
5933 { "rx_jabbers" },
5934 { "rx_undersize_packets" },
5935 { "rx_oversize_packets" },
5936 { "rx_64_byte_packets" },
5937 { "rx_65_to_127_byte_packets" },
5938 { "rx_128_to_255_byte_packets" },
5939 { "rx_256_to_511_byte_packets" },
5940 { "rx_512_to_1023_byte_packets" },
5941 { "rx_1024_to_1522_byte_packets" },
5942 { "rx_1523_to_9022_byte_packets" },
5943 { "tx_64_byte_packets" },
5944 { "tx_65_to_127_byte_packets" },
5945 { "tx_128_to_255_byte_packets" },
5946 { "tx_256_to_511_byte_packets" },
5947 { "tx_512_to_1023_byte_packets" },
5948 { "tx_1024_to_1522_byte_packets" },
5949 { "tx_1523_to_9022_byte_packets" },
5950 { "rx_xon_frames" },
5951 { "rx_xoff_frames" },
5952 { "tx_xon_frames" },
5953 { "tx_xoff_frames" },
5954 { "rx_mac_ctrl_frames" },
5955 { "rx_filtered_packets" },
5956 { "rx_discards" },
cea94db9 5957 { "rx_fw_discards" },
b6016b76
MC
5958};
5959
5960#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5961
f71e1309 5962static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5963 STATS_OFFSET32(stat_IfHCInOctets_hi),
5964 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5965 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5966 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5967 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5968 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5969 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5970 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5971 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5972 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5973 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
5974 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5975 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5976 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5977 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5978 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5979 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5980 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5981 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5982 STATS_OFFSET32(stat_EtherStatsCollisions),
5983 STATS_OFFSET32(stat_EtherStatsFragments),
5984 STATS_OFFSET32(stat_EtherStatsJabbers),
5985 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5986 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5987 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5988 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5989 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5990 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5991 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5992 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5993 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5994 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5995 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5996 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5997 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5998 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5999 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6000 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6001 STATS_OFFSET32(stat_XonPauseFramesReceived),
6002 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6003 STATS_OFFSET32(stat_OutXonSent),
6004 STATS_OFFSET32(stat_OutXoffSent),
6005 STATS_OFFSET32(stat_MacControlFramesReceived),
6006 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6007 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 6008 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
6009};
6010
6011/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6012 * skipped because of errata.
6aa20a22 6013 */
14ab9b86 6014static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6015 8,0,8,8,8,8,8,8,8,8,
6016 4,0,4,4,4,4,4,4,4,4,
6017 4,4,4,4,4,4,4,4,4,4,
6018 4,4,4,4,4,4,4,4,4,4,
cea94db9 6019 4,4,4,4,4,4,
b6016b76
MC
6020};
6021
5b0c76ad
MC
6022static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6023 8,0,8,8,8,8,8,8,8,8,
6024 4,4,4,4,4,4,4,4,4,4,
6025 4,4,4,4,4,4,4,4,4,4,
6026 4,4,4,4,4,4,4,4,4,4,
cea94db9 6027 4,4,4,4,4,4,
5b0c76ad
MC
6028};
6029
b6016b76
MC
6030#define BNX2_NUM_TESTS 6
6031
14ab9b86 6032static struct {
b6016b76
MC
6033 char string[ETH_GSTRING_LEN];
6034} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6035 { "register_test (offline)" },
6036 { "memory_test (offline)" },
6037 { "loopback_test (offline)" },
6038 { "nvram_test (online)" },
6039 { "interrupt_test (online)" },
6040 { "link_test (online)" },
6041};
6042
6043static int
6044bnx2_self_test_count(struct net_device *dev)
6045{
6046 return BNX2_NUM_TESTS;
6047}
6048
6049static void
6050bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6051{
972ec0d4 6052 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6053
6054 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6055 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
6056 int i;
6057
b6016b76
MC
6058 bnx2_netif_stop(bp);
6059 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6060 bnx2_free_skbs(bp);
6061
6062 if (bnx2_test_registers(bp) != 0) {
6063 buf[0] = 1;
6064 etest->flags |= ETH_TEST_FL_FAILED;
6065 }
6066 if (bnx2_test_memory(bp) != 0) {
6067 buf[1] = 1;
6068 etest->flags |= ETH_TEST_FL_FAILED;
6069 }
bc5a0690 6070 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 6071 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
6072
6073 if (!netif_running(bp->dev)) {
6074 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6075 }
6076 else {
6077 bnx2_init_nic(bp);
6078 bnx2_netif_start(bp);
6079 }
6080
6081 /* wait for link up */
80be4434
MC
6082 for (i = 0; i < 7; i++) {
6083 if (bp->link_up)
6084 break;
6085 msleep_interruptible(1000);
6086 }
b6016b76
MC
6087 }
6088
6089 if (bnx2_test_nvram(bp) != 0) {
6090 buf[3] = 1;
6091 etest->flags |= ETH_TEST_FL_FAILED;
6092 }
6093 if (bnx2_test_intr(bp) != 0) {
6094 buf[4] = 1;
6095 etest->flags |= ETH_TEST_FL_FAILED;
6096 }
6097
6098 if (bnx2_test_link(bp) != 0) {
6099 buf[5] = 1;
6100 etest->flags |= ETH_TEST_FL_FAILED;
6101
6102 }
6103}
6104
6105static void
6106bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6107{
6108 switch (stringset) {
6109 case ETH_SS_STATS:
6110 memcpy(buf, bnx2_stats_str_arr,
6111 sizeof(bnx2_stats_str_arr));
6112 break;
6113 case ETH_SS_TEST:
6114 memcpy(buf, bnx2_tests_str_arr,
6115 sizeof(bnx2_tests_str_arr));
6116 break;
6117 }
6118}
6119
6120static int
6121bnx2_get_stats_count(struct net_device *dev)
6122{
6123 return BNX2_NUM_STATS;
6124}
6125
6126static void
6127bnx2_get_ethtool_stats(struct net_device *dev,
6128 struct ethtool_stats *stats, u64 *buf)
6129{
972ec0d4 6130 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6131 int i;
6132 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 6133 u8 *stats_len_arr = NULL;
b6016b76
MC
6134
6135 if (hw_stats == NULL) {
6136 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6137 return;
6138 }
6139
5b0c76ad
MC
6140 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6141 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6142 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6143 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 6144 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
6145 else
6146 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
6147
6148 for (i = 0; i < BNX2_NUM_STATS; i++) {
6149 if (stats_len_arr[i] == 0) {
6150 /* skip this counter */
6151 buf[i] = 0;
6152 continue;
6153 }
6154 if (stats_len_arr[i] == 4) {
6155 /* 4-byte counter */
6156 buf[i] = (u64)
6157 *(hw_stats + bnx2_stats_offset_arr[i]);
6158 continue;
6159 }
6160 /* 8-byte counter */
6161 buf[i] = (((u64) *(hw_stats +
6162 bnx2_stats_offset_arr[i])) << 32) +
6163 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6164 }
6165}
6166
6167static int
6168bnx2_phys_id(struct net_device *dev, u32 data)
6169{
972ec0d4 6170 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6171 int i;
6172 u32 save;
6173
6174 if (data == 0)
6175 data = 2;
6176
6177 save = REG_RD(bp, BNX2_MISC_CFG);
6178 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6179
6180 for (i = 0; i < (data * 2); i++) {
6181 if ((i % 2) == 0) {
6182 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6183 }
6184 else {
6185 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6186 BNX2_EMAC_LED_1000MB_OVERRIDE |
6187 BNX2_EMAC_LED_100MB_OVERRIDE |
6188 BNX2_EMAC_LED_10MB_OVERRIDE |
6189 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6190 BNX2_EMAC_LED_TRAFFIC);
6191 }
6192 msleep_interruptible(500);
6193 if (signal_pending(current))
6194 break;
6195 }
6196 REG_WR(bp, BNX2_EMAC_LED, 0);
6197 REG_WR(bp, BNX2_MISC_CFG, save);
6198 return 0;
6199}
6200
4666f87a
MC
6201static int
6202bnx2_set_tx_csum(struct net_device *dev, u32 data)
6203{
6204 struct bnx2 *bp = netdev_priv(dev);
6205
6206 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6207 return (ethtool_op_set_tx_hw_csum(dev, data));
6208 else
6209 return (ethtool_op_set_tx_csum(dev, data));
6210}
6211
7282d491 6212static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
6213 .get_settings = bnx2_get_settings,
6214 .set_settings = bnx2_set_settings,
6215 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
6216 .get_regs_len = bnx2_get_regs_len,
6217 .get_regs = bnx2_get_regs,
b6016b76
MC
6218 .get_wol = bnx2_get_wol,
6219 .set_wol = bnx2_set_wol,
6220 .nway_reset = bnx2_nway_reset,
6221 .get_link = ethtool_op_get_link,
6222 .get_eeprom_len = bnx2_get_eeprom_len,
6223 .get_eeprom = bnx2_get_eeprom,
6224 .set_eeprom = bnx2_set_eeprom,
6225 .get_coalesce = bnx2_get_coalesce,
6226 .set_coalesce = bnx2_set_coalesce,
6227 .get_ringparam = bnx2_get_ringparam,
6228 .set_ringparam = bnx2_set_ringparam,
6229 .get_pauseparam = bnx2_get_pauseparam,
6230 .set_pauseparam = bnx2_set_pauseparam,
6231 .get_rx_csum = bnx2_get_rx_csum,
6232 .set_rx_csum = bnx2_set_rx_csum,
6233 .get_tx_csum = ethtool_op_get_tx_csum,
4666f87a 6234 .set_tx_csum = bnx2_set_tx_csum,
b6016b76
MC
6235 .get_sg = ethtool_op_get_sg,
6236 .set_sg = ethtool_op_set_sg,
b6016b76 6237 .get_tso = ethtool_op_get_tso,
b11d6213 6238 .set_tso = bnx2_set_tso,
b6016b76
MC
6239 .self_test_count = bnx2_self_test_count,
6240 .self_test = bnx2_self_test,
6241 .get_strings = bnx2_get_strings,
6242 .phys_id = bnx2_phys_id,
6243 .get_stats_count = bnx2_get_stats_count,
6244 .get_ethtool_stats = bnx2_get_ethtool_stats,
24b8e05d 6245 .get_perm_addr = ethtool_op_get_perm_addr,
b6016b76
MC
6246};
6247
6248/* Called with rtnl_lock */
6249static int
6250bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6251{
14ab9b86 6252 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 6253 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6254 int err;
6255
6256 switch(cmd) {
6257 case SIOCGMIIPHY:
6258 data->phy_id = bp->phy_addr;
6259
6260 /* fallthru */
6261 case SIOCGMIIREG: {
6262 u32 mii_regval;
6263
7b6b8347
MC
6264 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6265 return -EOPNOTSUPP;
6266
dad3e452
MC
6267 if (!netif_running(dev))
6268 return -EAGAIN;
6269
c770a65c 6270 spin_lock_bh(&bp->phy_lock);
b6016b76 6271 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 6272 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6273
6274 data->val_out = mii_regval;
6275
6276 return err;
6277 }
6278
6279 case SIOCSMIIREG:
6280 if (!capable(CAP_NET_ADMIN))
6281 return -EPERM;
6282
7b6b8347
MC
6283 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6284 return -EOPNOTSUPP;
6285
dad3e452
MC
6286 if (!netif_running(dev))
6287 return -EAGAIN;
6288
c770a65c 6289 spin_lock_bh(&bp->phy_lock);
b6016b76 6290 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 6291 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6292
6293 return err;
6294
6295 default:
6296 /* do nothing */
6297 break;
6298 }
6299 return -EOPNOTSUPP;
6300}
6301
6302/* Called with rtnl_lock */
6303static int
6304bnx2_change_mac_addr(struct net_device *dev, void *p)
6305{
6306 struct sockaddr *addr = p;
972ec0d4 6307 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6308
73eef4cd
MC
6309 if (!is_valid_ether_addr(addr->sa_data))
6310 return -EINVAL;
6311
b6016b76
MC
6312 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6313 if (netif_running(dev))
6314 bnx2_set_mac_addr(bp);
6315
6316 return 0;
6317}
6318
6319/* Called with rtnl_lock */
6320static int
6321bnx2_change_mtu(struct net_device *dev, int new_mtu)
6322{
972ec0d4 6323 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6324
6325 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6326 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6327 return -EINVAL;
6328
6329 dev->mtu = new_mtu;
6330 if (netif_running(dev)) {
6331 bnx2_netif_stop(bp);
6332
6333 bnx2_init_nic(bp);
6334
6335 bnx2_netif_start(bp);
6336 }
6337 return 0;
6338}
6339
6340#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6341static void
6342poll_bnx2(struct net_device *dev)
6343{
972ec0d4 6344 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6345
6346 disable_irq(bp->pdev->irq);
7d12e780 6347 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
6348 enable_irq(bp->pdev->irq);
6349}
6350#endif
6351
253c8b75
MC
6352static void __devinit
6353bnx2_get_5709_media(struct bnx2 *bp)
6354{
6355 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6356 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6357 u32 strap;
6358
6359 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6360 return;
6361 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6362 bp->phy_flags |= PHY_SERDES_FLAG;
6363 return;
6364 }
6365
6366 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6367 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6368 else
6369 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6370
6371 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6372 switch (strap) {
6373 case 0x4:
6374 case 0x5:
6375 case 0x6:
6376 bp->phy_flags |= PHY_SERDES_FLAG;
6377 return;
6378 }
6379 } else {
6380 switch (strap) {
6381 case 0x1:
6382 case 0x2:
6383 case 0x4:
6384 bp->phy_flags |= PHY_SERDES_FLAG;
6385 return;
6386 }
6387 }
6388}
6389
883e5151
MC
6390static void __devinit
6391bnx2_get_pci_speed(struct bnx2 *bp)
6392{
6393 u32 reg;
6394
6395 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6396 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6397 u32 clkreg;
6398
6399 bp->flags |= PCIX_FLAG;
6400
6401 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6402
6403 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6404 switch (clkreg) {
6405 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6406 bp->bus_speed_mhz = 133;
6407 break;
6408
6409 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6410 bp->bus_speed_mhz = 100;
6411 break;
6412
6413 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6414 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6415 bp->bus_speed_mhz = 66;
6416 break;
6417
6418 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6419 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6420 bp->bus_speed_mhz = 50;
6421 break;
6422
6423 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6424 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6425 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6426 bp->bus_speed_mhz = 33;
6427 break;
6428 }
6429 }
6430 else {
6431 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6432 bp->bus_speed_mhz = 66;
6433 else
6434 bp->bus_speed_mhz = 33;
6435 }
6436
6437 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6438 bp->flags |= PCI_32BIT_FLAG;
6439
6440}
6441
b6016b76
MC
6442static int __devinit
6443bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6444{
6445 struct bnx2 *bp;
6446 unsigned long mem_len;
6447 int rc;
6448 u32 reg;
40453c83 6449 u64 dma_mask, persist_dma_mask;
b6016b76
MC
6450
6451 SET_MODULE_OWNER(dev);
6452 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 6453 bp = netdev_priv(dev);
b6016b76
MC
6454
6455 bp->flags = 0;
6456 bp->phy_flags = 0;
6457
6458 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6459 rc = pci_enable_device(pdev);
6460 if (rc) {
9b91cf9d 6461 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
b6016b76
MC
6462 goto err_out;
6463 }
6464
6465 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 6466 dev_err(&pdev->dev,
2e8a538d 6467 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
6468 rc = -ENODEV;
6469 goto err_out_disable;
6470 }
6471
6472 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6473 if (rc) {
9b91cf9d 6474 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
6475 goto err_out_disable;
6476 }
6477
6478 pci_set_master(pdev);
6479
6480 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6481 if (bp->pm_cap == 0) {
9b91cf9d 6482 dev_err(&pdev->dev,
2e8a538d 6483 "Cannot find power management capability, aborting.\n");
b6016b76
MC
6484 rc = -EIO;
6485 goto err_out_release;
6486 }
6487
b6016b76
MC
6488 bp->dev = dev;
6489 bp->pdev = pdev;
6490
6491 spin_lock_init(&bp->phy_lock);
1b8227c4 6492 spin_lock_init(&bp->indirect_lock);
c4028958 6493 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
6494
6495 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
59b47d8a 6496 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
b6016b76
MC
6497 dev->mem_end = dev->mem_start + mem_len;
6498 dev->irq = pdev->irq;
6499
6500 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6501
6502 if (!bp->regview) {
9b91cf9d 6503 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
6504 rc = -ENOMEM;
6505 goto err_out_release;
6506 }
6507
6508 /* Configure byte swap and enable write to the reg_window registers.
6509 * Rely on CPU to do target byte swapping on big endian systems
6510 * The chip's target access swapping will not swap all accesses
6511 */
6512 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6513 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6514 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6515
829ca9a3 6516 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6517
6518 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6519
883e5151
MC
6520 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6521 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6522 dev_err(&pdev->dev,
6523 "Cannot find PCIE capability, aborting.\n");
6524 rc = -EIO;
6525 goto err_out_unmap;
6526 }
6527 bp->flags |= PCIE_FLAG;
6528 } else {
59b47d8a
MC
6529 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6530 if (bp->pcix_cap == 0) {
6531 dev_err(&pdev->dev,
6532 "Cannot find PCIX capability, aborting.\n");
6533 rc = -EIO;
6534 goto err_out_unmap;
6535 }
6536 }
6537
8e6a72c4
MC
6538 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6539 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6540 bp->flags |= MSI_CAP_FLAG;
6541 }
6542
40453c83
MC
6543 /* 5708 cannot support DMA addresses > 40-bit. */
6544 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6545 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6546 else
6547 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6548
6549 /* Configure DMA attributes. */
6550 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6551 dev->features |= NETIF_F_HIGHDMA;
6552 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6553 if (rc) {
6554 dev_err(&pdev->dev,
6555 "pci_set_consistent_dma_mask failed, aborting.\n");
6556 goto err_out_unmap;
6557 }
6558 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6559 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6560 goto err_out_unmap;
6561 }
6562
883e5151
MC
6563 if (!(bp->flags & PCIE_FLAG))
6564 bnx2_get_pci_speed(bp);
b6016b76
MC
6565
6566 /* 5706A0 may falsely detect SERR and PERR. */
6567 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6568 reg = REG_RD(bp, PCI_COMMAND);
6569 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6570 REG_WR(bp, PCI_COMMAND, reg);
6571 }
6572 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6573 !(bp->flags & PCIX_FLAG)) {
6574
9b91cf9d 6575 dev_err(&pdev->dev,
2e8a538d 6576 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
6577 goto err_out_unmap;
6578 }
6579
6580 bnx2_init_nvram(bp);
6581
e3648b3d
MC
6582 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6583
6584 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
6585 BNX2_SHM_HDR_SIGNATURE_SIG) {
6586 u32 off = PCI_FUNC(pdev->devfn) << 2;
6587
6588 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6589 } else
e3648b3d
MC
6590 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6591
b6016b76
MC
6592 /* Get the permanent MAC address. First we need to make sure the
6593 * firmware is actually running.
6594 */
e3648b3d 6595 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
6596
6597 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6598 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 6599 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
6600 rc = -ENODEV;
6601 goto err_out_unmap;
6602 }
6603
e3648b3d 6604 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
b6016b76 6605
e3648b3d 6606 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
6607 bp->mac_addr[0] = (u8) (reg >> 8);
6608 bp->mac_addr[1] = (u8) reg;
6609
e3648b3d 6610 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
6611 bp->mac_addr[2] = (u8) (reg >> 24);
6612 bp->mac_addr[3] = (u8) (reg >> 16);
6613 bp->mac_addr[4] = (u8) (reg >> 8);
6614 bp->mac_addr[5] = (u8) reg;
6615
6616 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 6617 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
6618
6619 bp->rx_csum = 1;
6620
6621 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6622
6623 bp->tx_quick_cons_trip_int = 20;
6624 bp->tx_quick_cons_trip = 20;
6625 bp->tx_ticks_int = 80;
6626 bp->tx_ticks = 80;
6aa20a22 6627
b6016b76
MC
6628 bp->rx_quick_cons_trip_int = 6;
6629 bp->rx_quick_cons_trip = 6;
6630 bp->rx_ticks_int = 18;
6631 bp->rx_ticks = 18;
6632
6633 bp->stats_ticks = 1000000 & 0xffff00;
6634
6635 bp->timer_interval = HZ;
cd339a0e 6636 bp->current_interval = HZ;
b6016b76 6637
5b0c76ad
MC
6638 bp->phy_addr = 1;
6639
b6016b76 6640 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
6641 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6642 bnx2_get_5709_media(bp);
6643 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
b6016b76 6644 bp->phy_flags |= PHY_SERDES_FLAG;
bac0dff6 6645
0d8a6571 6646 bp->phy_port = PORT_TP;
bac0dff6 6647 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 6648 bp->phy_port = PORT_FIBRE;
b6016b76 6649 bp->flags |= NO_WOL_FLAG;
bac0dff6 6650 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5b0c76ad 6651 bp->phy_addr = 2;
e3648b3d 6652 reg = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
6653 BNX2_SHARED_HW_CFG_CONFIG);
6654 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6655 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6656 }
0d8a6571
MC
6657 bnx2_init_remote_phy(bp);
6658
261dd5ca
MC
6659 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6660 CHIP_NUM(bp) == CHIP_NUM_5708)
6661 bp->phy_flags |= PHY_CRC_FIX_FLAG;
b659f44e
MC
6662 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6663 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
b6016b76 6664
16088272
MC
6665 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6666 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6667 (CHIP_ID(bp) == CHIP_ID_5708_B1))
dda1e390
MC
6668 bp->flags |= NO_WOL_FLAG;
6669
b6016b76
MC
6670 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6671 bp->tx_quick_cons_trip_int =
6672 bp->tx_quick_cons_trip;
6673 bp->tx_ticks_int = bp->tx_ticks;
6674 bp->rx_quick_cons_trip_int =
6675 bp->rx_quick_cons_trip;
6676 bp->rx_ticks_int = bp->rx_ticks;
6677 bp->comp_prod_trip_int = bp->comp_prod_trip;
6678 bp->com_ticks_int = bp->com_ticks;
6679 bp->cmd_ticks_int = bp->cmd_ticks;
6680 }
6681
f9317a40
MC
6682 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6683 *
6684 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6685 * with byte enables disabled on the unused 32-bit word. This is legal
6686 * but causes problems on the AMD 8132 which will eventually stop
6687 * responding after a while.
6688 *
6689 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 6690 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
6691 */
6692 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6693 struct pci_dev *amd_8132 = NULL;
6694
6695 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6696 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6697 amd_8132))) {
6698 u8 rev;
6699
6700 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6701 if (rev >= 0x10 && rev <= 0x13) {
6702 disable_msi = 1;
6703 pci_dev_put(amd_8132);
6704 break;
6705 }
6706 }
6707 }
6708
deaf391b 6709 bnx2_set_default_link(bp);
b6016b76
MC
6710 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6711
cd339a0e
MC
6712 init_timer(&bp->timer);
6713 bp->timer.expires = RUN_AT(bp->timer_interval);
6714 bp->timer.data = (unsigned long) bp;
6715 bp->timer.function = bnx2_timer;
6716
b6016b76
MC
6717 return 0;
6718
6719err_out_unmap:
6720 if (bp->regview) {
6721 iounmap(bp->regview);
73eef4cd 6722 bp->regview = NULL;
b6016b76
MC
6723 }
6724
6725err_out_release:
6726 pci_release_regions(pdev);
6727
6728err_out_disable:
6729 pci_disable_device(pdev);
6730 pci_set_drvdata(pdev, NULL);
6731
6732err_out:
6733 return rc;
6734}
6735
883e5151
MC
6736static char * __devinit
6737bnx2_bus_string(struct bnx2 *bp, char *str)
6738{
6739 char *s = str;
6740
6741 if (bp->flags & PCIE_FLAG) {
6742 s += sprintf(s, "PCI Express");
6743 } else {
6744 s += sprintf(s, "PCI");
6745 if (bp->flags & PCIX_FLAG)
6746 s += sprintf(s, "-X");
6747 if (bp->flags & PCI_32BIT_FLAG)
6748 s += sprintf(s, " 32-bit");
6749 else
6750 s += sprintf(s, " 64-bit");
6751 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6752 }
6753 return str;
6754}
6755
b6016b76
MC
6756static int __devinit
6757bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6758{
6759 static int version_printed = 0;
6760 struct net_device *dev = NULL;
6761 struct bnx2 *bp;
6762 int rc, i;
883e5151 6763 char str[40];
b6016b76
MC
6764
6765 if (version_printed++ == 0)
6766 printk(KERN_INFO "%s", version);
6767
6768 /* dev zeroed in init_etherdev */
6769 dev = alloc_etherdev(sizeof(*bp));
6770
6771 if (!dev)
6772 return -ENOMEM;
6773
6774 rc = bnx2_init_board(pdev, dev);
6775 if (rc < 0) {
6776 free_netdev(dev);
6777 return rc;
6778 }
6779
6780 dev->open = bnx2_open;
6781 dev->hard_start_xmit = bnx2_start_xmit;
6782 dev->stop = bnx2_close;
6783 dev->get_stats = bnx2_get_stats;
6784 dev->set_multicast_list = bnx2_set_rx_mode;
6785 dev->do_ioctl = bnx2_ioctl;
6786 dev->set_mac_address = bnx2_change_mac_addr;
6787 dev->change_mtu = bnx2_change_mtu;
6788 dev->tx_timeout = bnx2_tx_timeout;
6789 dev->watchdog_timeo = TX_TIMEOUT;
6790#ifdef BCM_VLAN
6791 dev->vlan_rx_register = bnx2_vlan_rx_register;
b6016b76
MC
6792#endif
6793 dev->poll = bnx2_poll;
6794 dev->ethtool_ops = &bnx2_ethtool_ops;
6795 dev->weight = 64;
6796
972ec0d4 6797 bp = netdev_priv(dev);
b6016b76
MC
6798
6799#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6800 dev->poll_controller = poll_bnx2;
6801#endif
6802
1b2f922f
MC
6803 pci_set_drvdata(pdev, dev);
6804
6805 memcpy(dev->dev_addr, bp->mac_addr, 6);
6806 memcpy(dev->perm_addr, bp->mac_addr, 6);
6807 bp->name = board_info[ent->driver_data].name;
6808
d212f87b 6809 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 6810 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
6811 dev->features |= NETIF_F_IPV6_CSUM;
6812
1b2f922f
MC
6813#ifdef BCM_VLAN
6814 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6815#endif
6816 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6817 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6818 dev->features |= NETIF_F_TSO6;
1b2f922f 6819
b6016b76 6820 if ((rc = register_netdev(dev))) {
9b91cf9d 6821 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
6822 if (bp->regview)
6823 iounmap(bp->regview);
6824 pci_release_regions(pdev);
6825 pci_disable_device(pdev);
6826 pci_set_drvdata(pdev, NULL);
6827 free_netdev(dev);
6828 return rc;
6829 }
6830
883e5151 6831 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
b6016b76
MC
6832 "IRQ %d, ",
6833 dev->name,
6834 bp->name,
6835 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6836 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 6837 bnx2_bus_string(bp, str),
b6016b76
MC
6838 dev->base_addr,
6839 bp->pdev->irq);
6840
6841 printk("node addr ");
6842 for (i = 0; i < 6; i++)
6843 printk("%2.2x", dev->dev_addr[i]);
6844 printk("\n");
6845
b6016b76
MC
6846 return 0;
6847}
6848
6849static void __devexit
6850bnx2_remove_one(struct pci_dev *pdev)
6851{
6852 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6853 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6854
afdc08b9
MC
6855 flush_scheduled_work();
6856
b6016b76
MC
6857 unregister_netdev(dev);
6858
6859 if (bp->regview)
6860 iounmap(bp->regview);
6861
6862 free_netdev(dev);
6863 pci_release_regions(pdev);
6864 pci_disable_device(pdev);
6865 pci_set_drvdata(pdev, NULL);
6866}
6867
6868static int
829ca9a3 6869bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
6870{
6871 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6872 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6873 u32 reset_code;
6874
6875 if (!netif_running(dev))
6876 return 0;
6877
1d60290f 6878 flush_scheduled_work();
b6016b76
MC
6879 bnx2_netif_stop(bp);
6880 netif_device_detach(dev);
6881 del_timer_sync(&bp->timer);
dda1e390 6882 if (bp->flags & NO_WOL_FLAG)
6c4f095e 6883 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 6884 else if (bp->wol)
b6016b76
MC
6885 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6886 else
6887 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6888 bnx2_reset_chip(bp, reset_code);
6889 bnx2_free_skbs(bp);
30c517b2 6890 pci_save_state(pdev);
829ca9a3 6891 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
6892 return 0;
6893}
6894
6895static int
6896bnx2_resume(struct pci_dev *pdev)
6897{
6898 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6899 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6900
6901 if (!netif_running(dev))
6902 return 0;
6903
30c517b2 6904 pci_restore_state(pdev);
829ca9a3 6905 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6906 netif_device_attach(dev);
6907 bnx2_init_nic(bp);
6908 bnx2_netif_start(bp);
6909 return 0;
6910}
6911
6912static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
6913 .name = DRV_MODULE_NAME,
6914 .id_table = bnx2_pci_tbl,
6915 .probe = bnx2_init_one,
6916 .remove = __devexit_p(bnx2_remove_one),
6917 .suspend = bnx2_suspend,
6918 .resume = bnx2_resume,
b6016b76
MC
6919};
6920
6921static int __init bnx2_init(void)
6922{
29917620 6923 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
6924}
6925
6926static void __exit bnx2_cleanup(void)
6927{
6928 pci_unregister_driver(&bnx2_pci_driver);
6929}
6930
6931module_init(bnx2_init);
6932module_exit(bnx2_cleanup);
6933
6934
6935