[BNX2]: Add indirect spinlock.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
206cc83c 3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
f2a4f052
MC
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
fba9fe91 49#include <linux/zlib.h>
f2a4f052 50
b6016b76
MC
51#include "bnx2.h"
52#include "bnx2_fw.h"
d43584c8 53#include "bnx2_fw2.h"
b6016b76
MC
54
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
68c9f75a
MC
57#define DRV_MODULE_VERSION "1.5.8"
58#define DRV_MODULE_RELDATE "April 24, 2007"
b6016b76
MC
59
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
e19360f2 65static const char version[] __devinitdata =
b6016b76
MC
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 69MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
70MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
5b0c76ad
MC
84 BCM5708,
85 BCM5708S,
bac0dff6 86 BCM5709,
27a005b8 87 BCM5709S,
b6016b76
MC
88} board_t;
89
90/* indexed by board_t, above */
f71e1309 91static const struct {
b6016b76
MC
92 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
b6016b76
MC
103 };
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
b6016b76
MC
124 { 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129 /* Slow EEPROM */
37137709 130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
b6016b76
MC
131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
37137709
MC
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76 136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
b6016b76
MC
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
37137709 141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
37137709 147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
37137709
MC
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
37137709
MC
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
b6016b76
MC
212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
e89bbf10
MC
216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
2f8af120 218 u32 diff;
e89bbf10 219
2f8af120 220 smp_mb();
faac9c4b
MC
221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
e89bbf10
MC
231 return (bp->tx_ring_size - diff);
232}
233
b6016b76
MC
234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
1b8227c4
MC
237 u32 val;
238
239 spin_lock_bh(&bp->indirect_lock);
b6016b76 240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
243 return val;
b6016b76
MC
244}
245
246static void
247bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248{
1b8227c4 249 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 252 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
253}
254
255static void
256bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257{
258 offset += cid_addr;
1b8227c4 259 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261 int i;
262
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
267 u32 val;
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270 break;
271 udelay(5);
272 }
273 } else {
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
276 }
1b8227c4 277 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
278}
279
280static int
281bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282{
283 u32 val1;
284 int i, ret;
285
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293 udelay(40);
294 }
295
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301 for (i = 0; i < 50; i++) {
302 udelay(10);
303
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306 udelay(5);
307
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311 break;
312 }
313 }
314
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316 *val = 0x0;
317 ret = -EBUSY;
318 }
319 else {
320 *val = val1;
321 ret = 0;
322 }
323
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 return ret;
335}
336
337static int
338bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339{
340 u32 val1;
341 int i, ret;
342
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350 udelay(40);
351 }
352
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 357
b6016b76
MC
358 for (i = 0; i < 50; i++) {
359 udelay(10);
360
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363 udelay(5);
364 break;
365 }
366 }
367
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369 ret = -EBUSY;
370 else
371 ret = 0;
372
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380 udelay(40);
381 }
382
383 return ret;
384}
385
386static void
387bnx2_disable_int(struct bnx2 *bp)
388{
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392}
393
394static void
395bnx2_enable_int(struct bnx2 *bp)
396{
1269a8a6
MC
397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
b6016b76
MC
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
bf5295bb 404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
405}
406
407static void
408bnx2_disable_int_sync(struct bnx2 *bp)
409{
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
413}
414
415static void
416bnx2_netif_stop(struct bnx2 *bp)
417{
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423 }
424}
425
426static void
427bnx2_netif_start(struct bnx2 *bp)
428{
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
433 bnx2_enable_int(bp);
434 }
435 }
436}
437
438static void
439bnx2_free_mem(struct bnx2 *bp)
440{
13daffa2
MC
441 int i;
442
59b47d8a
MC
443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk[i],
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
449 }
450 }
b6016b76 451 if (bp->status_blk) {
0f31f994 452 pci_free_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
0f31f994 455 bp->stats_blk = NULL;
b6016b76
MC
456 }
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
462 }
b4558ea9
JJ
463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
13daffa2
MC
465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_ring[i],
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
472 }
473 vfree(bp->rx_buf_ring);
b4558ea9 474 bp->rx_buf_ring = NULL;
b6016b76
MC
475}
476
477static int
478bnx2_alloc_mem(struct bnx2 *bp)
479{
0f31f994 480 int i, status_blk_size;
13daffa2 481
0f31f994
MC
482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 GFP_KERNEL);
b6016b76
MC
484 if (bp->tx_buf_ring == NULL)
485 return -ENOMEM;
486
b6016b76
MC
487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
489 TX_DESC_CNT,
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
492 goto alloc_mem_err;
493
13daffa2
MC
494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 bp->rx_max_ring);
b6016b76
MC
496 if (bp->rx_buf_ring == NULL)
497 goto alloc_mem_err;
498
13daffa2
MC
499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500 bp->rx_max_ring);
501
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
508 goto alloc_mem_err;
509
510 }
b6016b76 511
0f31f994
MC
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
516
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
520 goto alloc_mem_err;
521
0f31f994 522 memset(bp->status_blk, 0, bp->status_stats_size);
b6016b76 523
0f31f994
MC
524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525 status_blk_size);
b6016b76 526
0f31f994 527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 528
59b47d8a
MC
529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
532 bp->ctx_pages = 1;
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 BCM_PAGE_SIZE,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
538 goto alloc_mem_err;
539 }
540 }
b6016b76
MC
541 return 0;
542
543alloc_mem_err:
544 bnx2_free_mem(bp);
545 return -ENOMEM;
546}
547
e3648b3d
MC
548static void
549bnx2_report_fw_link(struct bnx2 *bp)
550{
551 u32 fw_link_status = 0;
552
553 if (bp->link_up) {
554 u32 bmsr;
555
556 switch (bp->line_speed) {
557 case SPEED_10:
558 if (bp->duplex == DUPLEX_HALF)
559 fw_link_status = BNX2_LINK_STATUS_10HALF;
560 else
561 fw_link_status = BNX2_LINK_STATUS_10FULL;
562 break;
563 case SPEED_100:
564 if (bp->duplex == DUPLEX_HALF)
565 fw_link_status = BNX2_LINK_STATUS_100HALF;
566 else
567 fw_link_status = BNX2_LINK_STATUS_100FULL;
568 break;
569 case SPEED_1000:
570 if (bp->duplex == DUPLEX_HALF)
571 fw_link_status = BNX2_LINK_STATUS_1000HALF;
572 else
573 fw_link_status = BNX2_LINK_STATUS_1000FULL;
574 break;
575 case SPEED_2500:
576 if (bp->duplex == DUPLEX_HALF)
577 fw_link_status = BNX2_LINK_STATUS_2500HALF;
578 else
579 fw_link_status = BNX2_LINK_STATUS_2500FULL;
580 break;
581 }
582
583 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
584
585 if (bp->autoneg) {
586 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
587
ca58c3af
MC
588 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
590
591 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
594 else
595 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
596 }
597 }
598 else
599 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
600
601 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
602}
603
b6016b76
MC
604static void
605bnx2_report_link(struct bnx2 *bp)
606{
607 if (bp->link_up) {
608 netif_carrier_on(bp->dev);
609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
610
611 printk("%d Mbps ", bp->line_speed);
612
613 if (bp->duplex == DUPLEX_FULL)
614 printk("full duplex");
615 else
616 printk("half duplex");
617
618 if (bp->flow_ctrl) {
619 if (bp->flow_ctrl & FLOW_CTRL_RX) {
620 printk(", receive ");
621 if (bp->flow_ctrl & FLOW_CTRL_TX)
622 printk("& transmit ");
623 }
624 else {
625 printk(", transmit ");
626 }
627 printk("flow control ON");
628 }
629 printk("\n");
630 }
631 else {
632 netif_carrier_off(bp->dev);
633 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
634 }
e3648b3d
MC
635
636 bnx2_report_fw_link(bp);
b6016b76
MC
637}
638
639static void
640bnx2_resolve_flow_ctrl(struct bnx2 *bp)
641{
642 u32 local_adv, remote_adv;
643
644 bp->flow_ctrl = 0;
6aa20a22 645 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
646 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
647
648 if (bp->duplex == DUPLEX_FULL) {
649 bp->flow_ctrl = bp->req_flow_ctrl;
650 }
651 return;
652 }
653
654 if (bp->duplex != DUPLEX_FULL) {
655 return;
656 }
657
5b0c76ad
MC
658 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
660 u32 val;
661
662 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664 bp->flow_ctrl |= FLOW_CTRL_TX;
665 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666 bp->flow_ctrl |= FLOW_CTRL_RX;
667 return;
668 }
669
ca58c3af
MC
670 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
672
673 if (bp->phy_flags & PHY_SERDES_FLAG) {
674 u32 new_local_adv = 0;
675 u32 new_remote_adv = 0;
676
677 if (local_adv & ADVERTISE_1000XPAUSE)
678 new_local_adv |= ADVERTISE_PAUSE_CAP;
679 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680 new_local_adv |= ADVERTISE_PAUSE_ASYM;
681 if (remote_adv & ADVERTISE_1000XPAUSE)
682 new_remote_adv |= ADVERTISE_PAUSE_CAP;
683 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
685
686 local_adv = new_local_adv;
687 remote_adv = new_remote_adv;
688 }
689
690 /* See Table 28B-3 of 802.3ab-1999 spec. */
691 if (local_adv & ADVERTISE_PAUSE_CAP) {
692 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693 if (remote_adv & ADVERTISE_PAUSE_CAP) {
694 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
695 }
696 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697 bp->flow_ctrl = FLOW_CTRL_RX;
698 }
699 }
700 else {
701 if (remote_adv & ADVERTISE_PAUSE_CAP) {
702 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
703 }
704 }
705 }
706 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
709
710 bp->flow_ctrl = FLOW_CTRL_TX;
711 }
712 }
713}
714
27a005b8
MC
715static int
716bnx2_5709s_linkup(struct bnx2 *bp)
717{
718 u32 val, speed;
719
720 bp->link_up = 1;
721
722 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
725
726 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727 bp->line_speed = bp->req_line_speed;
728 bp->duplex = bp->req_duplex;
729 return 0;
730 }
731 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
732 switch (speed) {
733 case MII_BNX2_GP_TOP_AN_SPEED_10:
734 bp->line_speed = SPEED_10;
735 break;
736 case MII_BNX2_GP_TOP_AN_SPEED_100:
737 bp->line_speed = SPEED_100;
738 break;
739 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741 bp->line_speed = SPEED_1000;
742 break;
743 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744 bp->line_speed = SPEED_2500;
745 break;
746 }
747 if (val & MII_BNX2_GP_TOP_AN_FD)
748 bp->duplex = DUPLEX_FULL;
749 else
750 bp->duplex = DUPLEX_HALF;
751 return 0;
752}
753
b6016b76 754static int
5b0c76ad
MC
755bnx2_5708s_linkup(struct bnx2 *bp)
756{
757 u32 val;
758
759 bp->link_up = 1;
760 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762 case BCM5708S_1000X_STAT1_SPEED_10:
763 bp->line_speed = SPEED_10;
764 break;
765 case BCM5708S_1000X_STAT1_SPEED_100:
766 bp->line_speed = SPEED_100;
767 break;
768 case BCM5708S_1000X_STAT1_SPEED_1G:
769 bp->line_speed = SPEED_1000;
770 break;
771 case BCM5708S_1000X_STAT1_SPEED_2G5:
772 bp->line_speed = SPEED_2500;
773 break;
774 }
775 if (val & BCM5708S_1000X_STAT1_FD)
776 bp->duplex = DUPLEX_FULL;
777 else
778 bp->duplex = DUPLEX_HALF;
779
780 return 0;
781}
782
783static int
784bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
785{
786 u32 bmcr, local_adv, remote_adv, common;
787
788 bp->link_up = 1;
789 bp->line_speed = SPEED_1000;
790
ca58c3af 791 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
792 if (bmcr & BMCR_FULLDPLX) {
793 bp->duplex = DUPLEX_FULL;
794 }
795 else {
796 bp->duplex = DUPLEX_HALF;
797 }
798
799 if (!(bmcr & BMCR_ANENABLE)) {
800 return 0;
801 }
802
ca58c3af
MC
803 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
805
806 common = local_adv & remote_adv;
807 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
808
809 if (common & ADVERTISE_1000XFULL) {
810 bp->duplex = DUPLEX_FULL;
811 }
812 else {
813 bp->duplex = DUPLEX_HALF;
814 }
815 }
816
817 return 0;
818}
819
820static int
821bnx2_copper_linkup(struct bnx2 *bp)
822{
823 u32 bmcr;
824
ca58c3af 825 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
826 if (bmcr & BMCR_ANENABLE) {
827 u32 local_adv, remote_adv, common;
828
829 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
831
832 common = local_adv & (remote_adv >> 2);
833 if (common & ADVERTISE_1000FULL) {
834 bp->line_speed = SPEED_1000;
835 bp->duplex = DUPLEX_FULL;
836 }
837 else if (common & ADVERTISE_1000HALF) {
838 bp->line_speed = SPEED_1000;
839 bp->duplex = DUPLEX_HALF;
840 }
841 else {
ca58c3af
MC
842 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
844
845 common = local_adv & remote_adv;
846 if (common & ADVERTISE_100FULL) {
847 bp->line_speed = SPEED_100;
848 bp->duplex = DUPLEX_FULL;
849 }
850 else if (common & ADVERTISE_100HALF) {
851 bp->line_speed = SPEED_100;
852 bp->duplex = DUPLEX_HALF;
853 }
854 else if (common & ADVERTISE_10FULL) {
855 bp->line_speed = SPEED_10;
856 bp->duplex = DUPLEX_FULL;
857 }
858 else if (common & ADVERTISE_10HALF) {
859 bp->line_speed = SPEED_10;
860 bp->duplex = DUPLEX_HALF;
861 }
862 else {
863 bp->line_speed = 0;
864 bp->link_up = 0;
865 }
866 }
867 }
868 else {
869 if (bmcr & BMCR_SPEED100) {
870 bp->line_speed = SPEED_100;
871 }
872 else {
873 bp->line_speed = SPEED_10;
874 }
875 if (bmcr & BMCR_FULLDPLX) {
876 bp->duplex = DUPLEX_FULL;
877 }
878 else {
879 bp->duplex = DUPLEX_HALF;
880 }
881 }
882
883 return 0;
884}
885
886static int
887bnx2_set_mac_link(struct bnx2 *bp)
888{
889 u32 val;
890
891 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893 (bp->duplex == DUPLEX_HALF)) {
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
895 }
896
897 /* Configure the EMAC mode register. */
898 val = REG_RD(bp, BNX2_EMAC_MODE);
899
900 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 901 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 902 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
903
904 if (bp->link_up) {
5b0c76ad
MC
905 switch (bp->line_speed) {
906 case SPEED_10:
59b47d8a
MC
907 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
909 break;
910 }
911 /* fall through */
912 case SPEED_100:
913 val |= BNX2_EMAC_MODE_PORT_MII;
914 break;
915 case SPEED_2500:
59b47d8a 916 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
917 /* fall through */
918 case SPEED_1000:
919 val |= BNX2_EMAC_MODE_PORT_GMII;
920 break;
921 }
b6016b76
MC
922 }
923 else {
924 val |= BNX2_EMAC_MODE_PORT_GMII;
925 }
926
927 /* Set the MAC to operate in the appropriate duplex mode. */
928 if (bp->duplex == DUPLEX_HALF)
929 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930 REG_WR(bp, BNX2_EMAC_MODE, val);
931
932 /* Enable/disable rx PAUSE. */
933 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
934
935 if (bp->flow_ctrl & FLOW_CTRL_RX)
936 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
938
939 /* Enable/disable tx PAUSE. */
940 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
942
943 if (bp->flow_ctrl & FLOW_CTRL_TX)
944 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
946
947 /* Acknowledge the interrupt. */
948 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
949
950 return 0;
951}
952
27a005b8
MC
953static void
954bnx2_enable_bmsr1(struct bnx2 *bp)
955{
956 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957 (CHIP_NUM(bp) == CHIP_NUM_5709))
958 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959 MII_BNX2_BLK_ADDR_GP_STATUS);
960}
961
962static void
963bnx2_disable_bmsr1(struct bnx2 *bp)
964{
965 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966 (CHIP_NUM(bp) == CHIP_NUM_5709))
967 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
969}
970
605a9e20
MC
971static int
972bnx2_test_and_enable_2g5(struct bnx2 *bp)
973{
974 u32 up1;
975 int ret = 1;
976
977 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
978 return 0;
979
980 if (bp->autoneg & AUTONEG_SPEED)
981 bp->advertising |= ADVERTISED_2500baseX_Full;
982
27a005b8
MC
983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
985
605a9e20
MC
986 bnx2_read_phy(bp, bp->mii_up1, &up1);
987 if (!(up1 & BCM5708S_UP1_2G5)) {
988 up1 |= BCM5708S_UP1_2G5;
989 bnx2_write_phy(bp, bp->mii_up1, up1);
990 ret = 0;
991 }
992
27a005b8
MC
993 if (CHIP_NUM(bp) == CHIP_NUM_5709)
994 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
996
605a9e20
MC
997 return ret;
998}
999
1000static int
1001bnx2_test_and_disable_2g5(struct bnx2 *bp)
1002{
1003 u32 up1;
1004 int ret = 0;
1005
1006 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1007 return 0;
1008
27a005b8
MC
1009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1011
605a9e20
MC
1012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (up1 & BCM5708S_UP1_2G5) {
1014 up1 &= ~BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1016 ret = 1;
1017 }
1018
27a005b8
MC
1019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1022
605a9e20
MC
1023 return ret;
1024}
1025
1026static void
1027bnx2_enable_forced_2g5(struct bnx2 *bp)
1028{
1029 u32 bmcr;
1030
1031 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1032 return;
1033
27a005b8
MC
1034 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1035 u32 val;
1036
1037 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038 MII_BNX2_BLK_ADDR_SERDES_DIG);
1039 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1043
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1047
1048 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050 bmcr |= BCM5708S_BMCR_FORCE_2500;
1051 }
1052
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 bmcr &= ~BMCR_ANENABLE;
1055 if (bp->req_duplex == DUPLEX_FULL)
1056 bmcr |= BMCR_FULLDPLX;
1057 }
1058 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1059}
1060
1061static void
1062bnx2_disable_forced_2g5(struct bnx2 *bp)
1063{
1064 u32 bmcr;
1065
1066 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1067 return;
1068
27a005b8
MC
1069 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1070 u32 val;
1071
1072 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073 MII_BNX2_BLK_ADDR_SERDES_DIG);
1074 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1077
1078 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1081
1082 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1085 }
1086
1087 if (bp->autoneg & AUTONEG_SPEED)
1088 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1090}
1091
b6016b76
MC
1092static int
1093bnx2_set_link(struct bnx2 *bp)
1094{
1095 u32 bmsr;
1096 u8 link_up;
1097
80be4434 1098 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1099 bp->link_up = 1;
1100 return 0;
1101 }
1102
1103 link_up = bp->link_up;
1104
27a005b8
MC
1105 bnx2_enable_bmsr1(bp);
1106 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108 bnx2_disable_bmsr1(bp);
b6016b76
MC
1109
1110 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1112 u32 val;
1113
1114 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115 if (val & BNX2_EMAC_STATUS_LINK)
1116 bmsr |= BMSR_LSTATUS;
1117 else
1118 bmsr &= ~BMSR_LSTATUS;
1119 }
1120
1121 if (bmsr & BMSR_LSTATUS) {
1122 bp->link_up = 1;
1123
1124 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1125 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126 bnx2_5706s_linkup(bp);
1127 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128 bnx2_5708s_linkup(bp);
27a005b8
MC
1129 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130 bnx2_5709s_linkup(bp);
b6016b76
MC
1131 }
1132 else {
1133 bnx2_copper_linkup(bp);
1134 }
1135 bnx2_resolve_flow_ctrl(bp);
1136 }
1137 else {
1138 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
605a9e20
MC
1139 (bp->autoneg & AUTONEG_SPEED))
1140 bnx2_disable_forced_2g5(bp);
b6016b76 1141
b6016b76
MC
1142 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1143 bp->link_up = 0;
1144 }
1145
1146 if (bp->link_up != link_up) {
1147 bnx2_report_link(bp);
1148 }
1149
1150 bnx2_set_mac_link(bp);
1151
1152 return 0;
1153}
1154
1155static int
1156bnx2_reset_phy(struct bnx2 *bp)
1157{
1158 int i;
1159 u32 reg;
1160
ca58c3af 1161 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1162
1163#define PHY_RESET_MAX_WAIT 100
1164 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1165 udelay(10);
1166
ca58c3af 1167 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1168 if (!(reg & BMCR_RESET)) {
1169 udelay(20);
1170 break;
1171 }
1172 }
1173 if (i == PHY_RESET_MAX_WAIT) {
1174 return -EBUSY;
1175 }
1176 return 0;
1177}
1178
1179static u32
1180bnx2_phy_get_pause_adv(struct bnx2 *bp)
1181{
1182 u32 adv = 0;
1183
1184 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1186
1187 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188 adv = ADVERTISE_1000XPAUSE;
1189 }
1190 else {
1191 adv = ADVERTISE_PAUSE_CAP;
1192 }
1193 }
1194 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196 adv = ADVERTISE_1000XPSE_ASYM;
1197 }
1198 else {
1199 adv = ADVERTISE_PAUSE_ASYM;
1200 }
1201 }
1202 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1205 }
1206 else {
1207 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1208 }
1209 }
1210 return adv;
1211}
1212
1213static int
1214bnx2_setup_serdes_phy(struct bnx2 *bp)
1215{
605a9e20 1216 u32 adv, bmcr;
b6016b76
MC
1217 u32 new_adv = 0;
1218
1219 if (!(bp->autoneg & AUTONEG_SPEED)) {
1220 u32 new_bmcr;
5b0c76ad
MC
1221 int force_link_down = 0;
1222
605a9e20
MC
1223 if (bp->req_line_speed == SPEED_2500) {
1224 if (!bnx2_test_and_enable_2g5(bp))
1225 force_link_down = 1;
1226 } else if (bp->req_line_speed == SPEED_1000) {
1227 if (bnx2_test_and_disable_2g5(bp))
1228 force_link_down = 1;
1229 }
ca58c3af 1230 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1231 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1232
ca58c3af 1233 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1234 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1235 new_bmcr |= BMCR_SPEED1000;
605a9e20 1236
27a005b8
MC
1237 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238 if (bp->req_line_speed == SPEED_2500)
1239 bnx2_enable_forced_2g5(bp);
1240 else if (bp->req_line_speed == SPEED_1000) {
1241 bnx2_disable_forced_2g5(bp);
1242 new_bmcr &= ~0x2000;
1243 }
1244
1245 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1246 if (bp->req_line_speed == SPEED_2500)
1247 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1248 else
1249 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1250 }
1251
b6016b76 1252 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1253 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1254 new_bmcr |= BMCR_FULLDPLX;
1255 }
1256 else {
5b0c76ad 1257 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1258 new_bmcr &= ~BMCR_FULLDPLX;
1259 }
5b0c76ad 1260 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1261 /* Force a link down visible on the other side */
1262 if (bp->link_up) {
ca58c3af 1263 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1264 ~(ADVERTISE_1000XFULL |
1265 ADVERTISE_1000XHALF));
ca58c3af 1266 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1267 BMCR_ANRESTART | BMCR_ANENABLE);
1268
1269 bp->link_up = 0;
1270 netif_carrier_off(bp->dev);
ca58c3af 1271 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1272 bnx2_report_link(bp);
b6016b76 1273 }
ca58c3af
MC
1274 bnx2_write_phy(bp, bp->mii_adv, adv);
1275 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1276 } else {
1277 bnx2_resolve_flow_ctrl(bp);
1278 bnx2_set_mac_link(bp);
b6016b76
MC
1279 }
1280 return 0;
1281 }
1282
605a9e20 1283 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1284
b6016b76
MC
1285 if (bp->advertising & ADVERTISED_1000baseT_Full)
1286 new_adv |= ADVERTISE_1000XFULL;
1287
1288 new_adv |= bnx2_phy_get_pause_adv(bp);
1289
ca58c3af
MC
1290 bnx2_read_phy(bp, bp->mii_adv, &adv);
1291 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1292
1293 bp->serdes_an_pending = 0;
1294 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295 /* Force a link down visible on the other side */
1296 if (bp->link_up) {
ca58c3af 1297 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1298 spin_unlock_bh(&bp->phy_lock);
1299 msleep(20);
1300 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1301 }
1302
ca58c3af
MC
1303 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1305 BMCR_ANENABLE);
f8dd064e
MC
1306 /* Speed up link-up time when the link partner
1307 * does not autonegotiate which is very common
1308 * in blade servers. Some blade servers use
1309 * IPMI for kerboard input and it's important
1310 * to minimize link disruptions. Autoneg. involves
1311 * exchanging base pages plus 3 next pages and
1312 * normally completes in about 120 msec.
1313 */
1314 bp->current_interval = SERDES_AN_TIMEOUT;
1315 bp->serdes_an_pending = 1;
1316 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1317 } else {
1318 bnx2_resolve_flow_ctrl(bp);
1319 bnx2_set_mac_link(bp);
b6016b76
MC
1320 }
1321
1322 return 0;
1323}
1324
1325#define ETHTOOL_ALL_FIBRE_SPEED \
1326 (ADVERTISED_1000baseT_Full)
1327
1328#define ETHTOOL_ALL_COPPER_SPEED \
1329 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1330 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1331 ADVERTISED_1000baseT_Full)
1332
1333#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1334 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1335
b6016b76
MC
1336#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1337
1338static int
1339bnx2_setup_copper_phy(struct bnx2 *bp)
1340{
1341 u32 bmcr;
1342 u32 new_bmcr;
1343
ca58c3af 1344 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1345
1346 if (bp->autoneg & AUTONEG_SPEED) {
1347 u32 adv_reg, adv1000_reg;
1348 u32 new_adv_reg = 0;
1349 u32 new_adv1000_reg = 0;
1350
ca58c3af 1351 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1352 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1353 ADVERTISE_PAUSE_ASYM);
1354
1355 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1356 adv1000_reg &= PHY_ALL_1000_SPEED;
1357
1358 if (bp->advertising & ADVERTISED_10baseT_Half)
1359 new_adv_reg |= ADVERTISE_10HALF;
1360 if (bp->advertising & ADVERTISED_10baseT_Full)
1361 new_adv_reg |= ADVERTISE_10FULL;
1362 if (bp->advertising & ADVERTISED_100baseT_Half)
1363 new_adv_reg |= ADVERTISE_100HALF;
1364 if (bp->advertising & ADVERTISED_100baseT_Full)
1365 new_adv_reg |= ADVERTISE_100FULL;
1366 if (bp->advertising & ADVERTISED_1000baseT_Full)
1367 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1368
b6016b76
MC
1369 new_adv_reg |= ADVERTISE_CSMA;
1370
1371 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1372
1373 if ((adv1000_reg != new_adv1000_reg) ||
1374 (adv_reg != new_adv_reg) ||
1375 ((bmcr & BMCR_ANENABLE) == 0)) {
1376
ca58c3af 1377 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1378 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1379 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1380 BMCR_ANENABLE);
1381 }
1382 else if (bp->link_up) {
1383 /* Flow ctrl may have changed from auto to forced */
1384 /* or vice-versa. */
1385
1386 bnx2_resolve_flow_ctrl(bp);
1387 bnx2_set_mac_link(bp);
1388 }
1389 return 0;
1390 }
1391
1392 new_bmcr = 0;
1393 if (bp->req_line_speed == SPEED_100) {
1394 new_bmcr |= BMCR_SPEED100;
1395 }
1396 if (bp->req_duplex == DUPLEX_FULL) {
1397 new_bmcr |= BMCR_FULLDPLX;
1398 }
1399 if (new_bmcr != bmcr) {
1400 u32 bmsr;
b6016b76 1401
ca58c3af
MC
1402 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1403 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1404
b6016b76
MC
1405 if (bmsr & BMSR_LSTATUS) {
1406 /* Force link down */
ca58c3af 1407 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1408 spin_unlock_bh(&bp->phy_lock);
1409 msleep(50);
1410 spin_lock_bh(&bp->phy_lock);
1411
ca58c3af
MC
1412 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1413 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1414 }
1415
ca58c3af 1416 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1417
1418 /* Normally, the new speed is setup after the link has
1419 * gone down and up again. In some cases, link will not go
1420 * down so we need to set up the new speed here.
1421 */
1422 if (bmsr & BMSR_LSTATUS) {
1423 bp->line_speed = bp->req_line_speed;
1424 bp->duplex = bp->req_duplex;
1425 bnx2_resolve_flow_ctrl(bp);
1426 bnx2_set_mac_link(bp);
1427 }
27a005b8
MC
1428 } else {
1429 bnx2_resolve_flow_ctrl(bp);
1430 bnx2_set_mac_link(bp);
b6016b76
MC
1431 }
1432 return 0;
1433}
1434
1435static int
1436bnx2_setup_phy(struct bnx2 *bp)
1437{
1438 if (bp->loopback == MAC_LOOPBACK)
1439 return 0;
1440
1441 if (bp->phy_flags & PHY_SERDES_FLAG) {
1442 return (bnx2_setup_serdes_phy(bp));
1443 }
1444 else {
1445 return (bnx2_setup_copper_phy(bp));
1446 }
1447}
1448
27a005b8
MC
1449static int
1450bnx2_init_5709s_phy(struct bnx2 *bp)
1451{
1452 u32 val;
1453
1454 bp->mii_bmcr = MII_BMCR + 0x10;
1455 bp->mii_bmsr = MII_BMSR + 0x10;
1456 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1457 bp->mii_adv = MII_ADVERTISE + 0x10;
1458 bp->mii_lpa = MII_LPA + 0x10;
1459 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1460
1461 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1462 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1463
1464 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1465 bnx2_reset_phy(bp);
1466
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1468
1469 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1470 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1471 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1472 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1473
1474 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1475 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1476 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1477 val |= BCM5708S_UP1_2G5;
1478 else
1479 val &= ~BCM5708S_UP1_2G5;
1480 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1481
1482 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1483 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1484 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1485 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1486
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1488
1489 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1490 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1491 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1492
1493 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1494
1495 return 0;
1496}
1497
b6016b76 1498static int
5b0c76ad
MC
1499bnx2_init_5708s_phy(struct bnx2 *bp)
1500{
1501 u32 val;
1502
27a005b8
MC
1503 bnx2_reset_phy(bp);
1504
1505 bp->mii_up1 = BCM5708S_UP1;
1506
5b0c76ad
MC
1507 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1508 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1509 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1510
1511 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1512 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1513 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1514
1515 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1516 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1517 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1518
1519 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1520 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1521 val |= BCM5708S_UP1_2G5;
1522 bnx2_write_phy(bp, BCM5708S_UP1, val);
1523 }
1524
1525 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1526 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1527 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1528 /* increase tx signal amplitude */
1529 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1530 BCM5708S_BLK_ADDR_TX_MISC);
1531 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1532 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1533 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1534 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1535 }
1536
e3648b3d 1537 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1538 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1539
1540 if (val) {
1541 u32 is_backplane;
1542
e3648b3d 1543 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1544 BNX2_SHARED_HW_CFG_CONFIG);
1545 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1546 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1547 BCM5708S_BLK_ADDR_TX_MISC);
1548 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1549 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1550 BCM5708S_BLK_ADDR_DIG);
1551 }
1552 }
1553 return 0;
1554}
1555
1556static int
1557bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76 1558{
27a005b8
MC
1559 bnx2_reset_phy(bp);
1560
b6016b76
MC
1561 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1562
59b47d8a
MC
1563 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
1565
1566 if (bp->dev->mtu > 1500) {
1567 u32 val;
1568
1569 /* Set extended packet length bit */
1570 bnx2_write_phy(bp, 0x18, 0x7);
1571 bnx2_read_phy(bp, 0x18, &val);
1572 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1573
1574 bnx2_write_phy(bp, 0x1c, 0x6c00);
1575 bnx2_read_phy(bp, 0x1c, &val);
1576 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1577 }
1578 else {
1579 u32 val;
1580
1581 bnx2_write_phy(bp, 0x18, 0x7);
1582 bnx2_read_phy(bp, 0x18, &val);
1583 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1584
1585 bnx2_write_phy(bp, 0x1c, 0x6c00);
1586 bnx2_read_phy(bp, 0x1c, &val);
1587 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1588 }
1589
1590 return 0;
1591}
1592
1593static int
1594bnx2_init_copper_phy(struct bnx2 *bp)
1595{
5b0c76ad
MC
1596 u32 val;
1597
27a005b8
MC
1598 bnx2_reset_phy(bp);
1599
b6016b76
MC
1600 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1601 bnx2_write_phy(bp, 0x18, 0x0c00);
1602 bnx2_write_phy(bp, 0x17, 0x000a);
1603 bnx2_write_phy(bp, 0x15, 0x310b);
1604 bnx2_write_phy(bp, 0x17, 0x201f);
1605 bnx2_write_phy(bp, 0x15, 0x9506);
1606 bnx2_write_phy(bp, 0x17, 0x401f);
1607 bnx2_write_phy(bp, 0x15, 0x14e2);
1608 bnx2_write_phy(bp, 0x18, 0x0400);
1609 }
1610
b659f44e
MC
1611 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1612 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1613 MII_BNX2_DSP_EXPAND_REG | 0x8);
1614 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1615 val &= ~(1 << 8);
1616 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1617 }
1618
b6016b76 1619 if (bp->dev->mtu > 1500) {
b6016b76
MC
1620 /* Set extended packet length bit */
1621 bnx2_write_phy(bp, 0x18, 0x7);
1622 bnx2_read_phy(bp, 0x18, &val);
1623 bnx2_write_phy(bp, 0x18, val | 0x4000);
1624
1625 bnx2_read_phy(bp, 0x10, &val);
1626 bnx2_write_phy(bp, 0x10, val | 0x1);
1627 }
1628 else {
b6016b76
MC
1629 bnx2_write_phy(bp, 0x18, 0x7);
1630 bnx2_read_phy(bp, 0x18, &val);
1631 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1632
1633 bnx2_read_phy(bp, 0x10, &val);
1634 bnx2_write_phy(bp, 0x10, val & ~0x1);
1635 }
1636
5b0c76ad
MC
1637 /* ethernet@wirespeed */
1638 bnx2_write_phy(bp, 0x18, 0x7007);
1639 bnx2_read_phy(bp, 0x18, &val);
1640 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1641 return 0;
1642}
1643
1644
1645static int
1646bnx2_init_phy(struct bnx2 *bp)
1647{
1648 u32 val;
1649 int rc = 0;
1650
1651 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1652 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1653
ca58c3af
MC
1654 bp->mii_bmcr = MII_BMCR;
1655 bp->mii_bmsr = MII_BMSR;
27a005b8 1656 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
1657 bp->mii_adv = MII_ADVERTISE;
1658 bp->mii_lpa = MII_LPA;
1659
b6016b76
MC
1660 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1661
b6016b76
MC
1662 bnx2_read_phy(bp, MII_PHYSID1, &val);
1663 bp->phy_id = val << 16;
1664 bnx2_read_phy(bp, MII_PHYSID2, &val);
1665 bp->phy_id |= val & 0xffff;
1666
1667 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1668 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1669 rc = bnx2_init_5706s_phy(bp);
1670 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1671 rc = bnx2_init_5708s_phy(bp);
27a005b8
MC
1672 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1673 rc = bnx2_init_5709s_phy(bp);
b6016b76
MC
1674 }
1675 else {
1676 rc = bnx2_init_copper_phy(bp);
1677 }
1678
1679 bnx2_setup_phy(bp);
1680
1681 return rc;
1682}
1683
1684static int
1685bnx2_set_mac_loopback(struct bnx2 *bp)
1686{
1687 u32 mac_mode;
1688
1689 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1690 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1691 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1692 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1693 bp->link_up = 1;
1694 return 0;
1695}
1696
bc5a0690
MC
1697static int bnx2_test_link(struct bnx2 *);
1698
1699static int
1700bnx2_set_phy_loopback(struct bnx2 *bp)
1701{
1702 u32 mac_mode;
1703 int rc, i;
1704
1705 spin_lock_bh(&bp->phy_lock);
ca58c3af 1706 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
1707 BMCR_SPEED1000);
1708 spin_unlock_bh(&bp->phy_lock);
1709 if (rc)
1710 return rc;
1711
1712 for (i = 0; i < 10; i++) {
1713 if (bnx2_test_link(bp) == 0)
1714 break;
80be4434 1715 msleep(100);
bc5a0690
MC
1716 }
1717
1718 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1719 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1720 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1721 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
1722
1723 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1724 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1725 bp->link_up = 1;
1726 return 0;
1727}
1728
b6016b76 1729static int
b090ae2b 1730bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
1731{
1732 int i;
1733 u32 val;
1734
b6016b76
MC
1735 bp->fw_wr_seq++;
1736 msg_data |= bp->fw_wr_seq;
1737
e3648b3d 1738 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
1739
1740 /* wait for an acknowledgement. */
b090ae2b
MC
1741 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1742 msleep(10);
b6016b76 1743
e3648b3d 1744 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
1745
1746 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1747 break;
1748 }
b090ae2b
MC
1749 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1750 return 0;
b6016b76
MC
1751
1752 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
1753 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1754 if (!silent)
1755 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1756 "%x\n", msg_data);
b6016b76
MC
1757
1758 msg_data &= ~BNX2_DRV_MSG_CODE;
1759 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1760
e3648b3d 1761 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 1762
b6016b76
MC
1763 return -EBUSY;
1764 }
1765
b090ae2b
MC
1766 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1767 return -EIO;
1768
b6016b76
MC
1769 return 0;
1770}
1771
59b47d8a
MC
1772static int
1773bnx2_init_5709_context(struct bnx2 *bp)
1774{
1775 int i, ret = 0;
1776 u32 val;
1777
1778 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1779 val |= (BCM_PAGE_BITS - 8) << 16;
1780 REG_WR(bp, BNX2_CTX_COMMAND, val);
1781 for (i = 0; i < bp->ctx_pages; i++) {
1782 int j;
1783
1784 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1785 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1786 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1787 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1788 (u64) bp->ctx_blk_mapping[i] >> 32);
1789 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1790 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1791 for (j = 0; j < 10; j++) {
1792
1793 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1794 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1795 break;
1796 udelay(5);
1797 }
1798 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1799 ret = -EBUSY;
1800 break;
1801 }
1802 }
1803 return ret;
1804}
1805
b6016b76
MC
1806static void
1807bnx2_init_context(struct bnx2 *bp)
1808{
1809 u32 vcid;
1810
1811 vcid = 96;
1812 while (vcid) {
1813 u32 vcid_addr, pcid_addr, offset;
1814
1815 vcid--;
1816
1817 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1818 u32 new_vcid;
1819
1820 vcid_addr = GET_PCID_ADDR(vcid);
1821 if (vcid & 0x8) {
1822 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1823 }
1824 else {
1825 new_vcid = vcid;
1826 }
1827 pcid_addr = GET_PCID_ADDR(new_vcid);
1828 }
1829 else {
1830 vcid_addr = GET_CID_ADDR(vcid);
1831 pcid_addr = vcid_addr;
1832 }
1833
1834 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1835 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1836
1837 /* Zero out the context. */
1838 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1839 CTX_WR(bp, 0x00, offset, 0);
1840 }
1841
1842 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1843 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1844 }
1845}
1846
1847static int
1848bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1849{
1850 u16 *good_mbuf;
1851 u32 good_mbuf_cnt;
1852 u32 val;
1853
1854 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1855 if (good_mbuf == NULL) {
1856 printk(KERN_ERR PFX "Failed to allocate memory in "
1857 "bnx2_alloc_bad_rbuf\n");
1858 return -ENOMEM;
1859 }
1860
1861 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1862 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1863
1864 good_mbuf_cnt = 0;
1865
1866 /* Allocate a bunch of mbufs and save the good ones in an array. */
1867 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1868 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1869 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1870
1871 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1872
1873 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1874
1875 /* The addresses with Bit 9 set are bad memory blocks. */
1876 if (!(val & (1 << 9))) {
1877 good_mbuf[good_mbuf_cnt] = (u16) val;
1878 good_mbuf_cnt++;
1879 }
1880
1881 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1882 }
1883
1884 /* Free the good ones back to the mbuf pool thus discarding
1885 * all the bad ones. */
1886 while (good_mbuf_cnt) {
1887 good_mbuf_cnt--;
1888
1889 val = good_mbuf[good_mbuf_cnt];
1890 val = (val << 9) | val | 1;
1891
1892 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1893 }
1894 kfree(good_mbuf);
1895 return 0;
1896}
1897
1898static void
6aa20a22 1899bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
1900{
1901 u32 val;
1902 u8 *mac_addr = bp->dev->dev_addr;
1903
1904 val = (mac_addr[0] << 8) | mac_addr[1];
1905
1906 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1907
6aa20a22 1908 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
1909 (mac_addr[4] << 8) | mac_addr[5];
1910
1911 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1912}
1913
1914static inline int
1915bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1916{
1917 struct sk_buff *skb;
1918 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1919 dma_addr_t mapping;
13daffa2 1920 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
1921 unsigned long align;
1922
932f3772 1923 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
1924 if (skb == NULL) {
1925 return -ENOMEM;
1926 }
1927
59b47d8a
MC
1928 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1929 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 1930
b6016b76
MC
1931 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1932 PCI_DMA_FROMDEVICE);
1933
1934 rx_buf->skb = skb;
1935 pci_unmap_addr_set(rx_buf, mapping, mapping);
1936
1937 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1938 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1939
1940 bp->rx_prod_bseq += bp->rx_buf_use_size;
1941
1942 return 0;
1943}
1944
1945static void
1946bnx2_phy_int(struct bnx2 *bp)
1947{
1948 u32 new_link_state, old_link_state;
1949
1950 new_link_state = bp->status_blk->status_attn_bits &
1951 STATUS_ATTN_BITS_LINK_STATE;
1952 old_link_state = bp->status_blk->status_attn_bits_ack &
1953 STATUS_ATTN_BITS_LINK_STATE;
1954 if (new_link_state != old_link_state) {
1955 if (new_link_state) {
1956 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1957 STATUS_ATTN_BITS_LINK_STATE);
1958 }
1959 else {
1960 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1961 STATUS_ATTN_BITS_LINK_STATE);
1962 }
1963 bnx2_set_link(bp);
1964 }
1965}
1966
1967static void
1968bnx2_tx_int(struct bnx2 *bp)
1969{
f4e418f7 1970 struct status_block *sblk = bp->status_blk;
b6016b76
MC
1971 u16 hw_cons, sw_cons, sw_ring_cons;
1972 int tx_free_bd = 0;
1973
f4e418f7 1974 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
b6016b76
MC
1975 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1976 hw_cons++;
1977 }
1978 sw_cons = bp->tx_cons;
1979
1980 while (sw_cons != hw_cons) {
1981 struct sw_bd *tx_buf;
1982 struct sk_buff *skb;
1983 int i, last;
1984
1985 sw_ring_cons = TX_RING_IDX(sw_cons);
1986
1987 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1988 skb = tx_buf->skb;
1d39ed56 1989
b6016b76 1990 /* partial BD completions possible with TSO packets */
89114afd 1991 if (skb_is_gso(skb)) {
b6016b76
MC
1992 u16 last_idx, last_ring_idx;
1993
1994 last_idx = sw_cons +
1995 skb_shinfo(skb)->nr_frags + 1;
1996 last_ring_idx = sw_ring_cons +
1997 skb_shinfo(skb)->nr_frags + 1;
1998 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1999 last_idx++;
2000 }
2001 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2002 break;
2003 }
2004 }
1d39ed56 2005
b6016b76
MC
2006 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2007 skb_headlen(skb), PCI_DMA_TODEVICE);
2008
2009 tx_buf->skb = NULL;
2010 last = skb_shinfo(skb)->nr_frags;
2011
2012 for (i = 0; i < last; i++) {
2013 sw_cons = NEXT_TX_BD(sw_cons);
2014
2015 pci_unmap_page(bp->pdev,
2016 pci_unmap_addr(
2017 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2018 mapping),
2019 skb_shinfo(skb)->frags[i].size,
2020 PCI_DMA_TODEVICE);
2021 }
2022
2023 sw_cons = NEXT_TX_BD(sw_cons);
2024
2025 tx_free_bd += last + 1;
2026
745720e5 2027 dev_kfree_skb(skb);
b6016b76 2028
f4e418f7
MC
2029 hw_cons = bp->hw_tx_cons =
2030 sblk->status_tx_quick_consumer_index0;
2031
b6016b76
MC
2032 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2033 hw_cons++;
2034 }
2035 }
2036
e89bbf10 2037 bp->tx_cons = sw_cons;
2f8af120
MC
2038 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2039 * before checking for netif_queue_stopped(). Without the
2040 * memory barrier, there is a small possibility that bnx2_start_xmit()
2041 * will miss it and cause the queue to be stopped forever.
2042 */
2043 smp_mb();
b6016b76 2044
2f8af120
MC
2045 if (unlikely(netif_queue_stopped(bp->dev)) &&
2046 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2047 netif_tx_lock(bp->dev);
b6016b76 2048 if ((netif_queue_stopped(bp->dev)) &&
2f8af120 2049 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
b6016b76 2050 netif_wake_queue(bp->dev);
2f8af120 2051 netif_tx_unlock(bp->dev);
b6016b76 2052 }
b6016b76
MC
2053}
2054
2055static inline void
2056bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2057 u16 cons, u16 prod)
2058{
236b6394
MC
2059 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2060 struct rx_bd *cons_bd, *prod_bd;
2061
2062 cons_rx_buf = &bp->rx_buf_ring[cons];
2063 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
2064
2065 pci_dma_sync_single_for_device(bp->pdev,
2066 pci_unmap_addr(cons_rx_buf, mapping),
2067 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2068
236b6394 2069 bp->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2070
236b6394 2071 prod_rx_buf->skb = skb;
b6016b76 2072
236b6394
MC
2073 if (cons == prod)
2074 return;
b6016b76 2075
236b6394
MC
2076 pci_unmap_addr_set(prod_rx_buf, mapping,
2077 pci_unmap_addr(cons_rx_buf, mapping));
2078
3fdfcc2c
MC
2079 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2080 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2081 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2082 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2083}
2084
2085static int
2086bnx2_rx_int(struct bnx2 *bp, int budget)
2087{
f4e418f7 2088 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2089 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2090 struct l2_fhdr *rx_hdr;
2091 int rx_pkt = 0;
2092
f4e418f7 2093 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
b6016b76
MC
2094 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2095 hw_cons++;
2096 }
2097 sw_cons = bp->rx_cons;
2098 sw_prod = bp->rx_prod;
2099
2100 /* Memory barrier necessary as speculative reads of the rx
2101 * buffer can be ahead of the index in the status block
2102 */
2103 rmb();
2104 while (sw_cons != hw_cons) {
2105 unsigned int len;
ade2bfe7 2106 u32 status;
b6016b76
MC
2107 struct sw_bd *rx_buf;
2108 struct sk_buff *skb;
236b6394 2109 dma_addr_t dma_addr;
b6016b76
MC
2110
2111 sw_ring_cons = RX_RING_IDX(sw_cons);
2112 sw_ring_prod = RX_RING_IDX(sw_prod);
2113
2114 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2115 skb = rx_buf->skb;
236b6394
MC
2116
2117 rx_buf->skb = NULL;
2118
2119 dma_addr = pci_unmap_addr(rx_buf, mapping);
2120
2121 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
2122 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2123
2124 rx_hdr = (struct l2_fhdr *) skb->data;
2125 len = rx_hdr->l2_fhdr_pkt_len - 4;
2126
ade2bfe7 2127 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
2128 (L2_FHDR_ERRORS_BAD_CRC |
2129 L2_FHDR_ERRORS_PHY_DECODE |
2130 L2_FHDR_ERRORS_ALIGNMENT |
2131 L2_FHDR_ERRORS_TOO_SHORT |
2132 L2_FHDR_ERRORS_GIANT_FRAME)) {
2133
2134 goto reuse_rx;
2135 }
2136
2137 /* Since we don't have a jumbo ring, copy small packets
2138 * if mtu > 1500
2139 */
2140 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2141 struct sk_buff *new_skb;
2142
932f3772 2143 new_skb = netdev_alloc_skb(bp->dev, len + 2);
b6016b76
MC
2144 if (new_skb == NULL)
2145 goto reuse_rx;
2146
2147 /* aligned copy */
d626f62b
ACM
2148 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2149 new_skb->data, len + 2);
b6016b76
MC
2150 skb_reserve(new_skb, 2);
2151 skb_put(new_skb, len);
b6016b76
MC
2152
2153 bnx2_reuse_rx_skb(bp, skb,
2154 sw_ring_cons, sw_ring_prod);
2155
2156 skb = new_skb;
2157 }
2158 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
236b6394 2159 pci_unmap_single(bp->pdev, dma_addr,
b6016b76
MC
2160 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2161
2162 skb_reserve(skb, bp->rx_offset);
2163 skb_put(skb, len);
2164 }
2165 else {
2166reuse_rx:
2167 bnx2_reuse_rx_skb(bp, skb,
2168 sw_ring_cons, sw_ring_prod);
2169 goto next_rx;
2170 }
2171
2172 skb->protocol = eth_type_trans(skb, bp->dev);
2173
2174 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 2175 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 2176
745720e5 2177 dev_kfree_skb(skb);
b6016b76
MC
2178 goto next_rx;
2179
2180 }
2181
b6016b76
MC
2182 skb->ip_summed = CHECKSUM_NONE;
2183 if (bp->rx_csum &&
2184 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2185 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2186
ade2bfe7
MC
2187 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2188 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
2189 skb->ip_summed = CHECKSUM_UNNECESSARY;
2190 }
2191
2192#ifdef BCM_VLAN
2193 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2194 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2195 rx_hdr->l2_fhdr_vlan_tag);
2196 }
2197 else
2198#endif
2199 netif_receive_skb(skb);
2200
2201 bp->dev->last_rx = jiffies;
2202 rx_pkt++;
2203
2204next_rx:
b6016b76
MC
2205 sw_cons = NEXT_RX_BD(sw_cons);
2206 sw_prod = NEXT_RX_BD(sw_prod);
2207
2208 if ((rx_pkt == budget))
2209 break;
f4e418f7
MC
2210
2211 /* Refresh hw_cons to see if there is new work */
2212 if (sw_cons == hw_cons) {
2213 hw_cons = bp->hw_rx_cons =
2214 sblk->status_rx_quick_consumer_index0;
2215 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2216 hw_cons++;
2217 rmb();
2218 }
b6016b76
MC
2219 }
2220 bp->rx_cons = sw_cons;
2221 bp->rx_prod = sw_prod;
2222
2223 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2224
2225 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2226
2227 mmiowb();
2228
2229 return rx_pkt;
2230
2231}
2232
2233/* MSI ISR - The only difference between this and the INTx ISR
2234 * is that the MSI interrupt is always serviced.
2235 */
2236static irqreturn_t
7d12e780 2237bnx2_msi(int irq, void *dev_instance)
b6016b76
MC
2238{
2239 struct net_device *dev = dev_instance;
972ec0d4 2240 struct bnx2 *bp = netdev_priv(dev);
b6016b76 2241
c921e4c4 2242 prefetch(bp->status_blk);
b6016b76
MC
2243 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2244 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2245 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2246
2247 /* Return here if interrupt is disabled. */
73eef4cd
MC
2248 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2249 return IRQ_HANDLED;
b6016b76 2250
73eef4cd 2251 netif_rx_schedule(dev);
b6016b76 2252
73eef4cd 2253 return IRQ_HANDLED;
b6016b76
MC
2254}
2255
2256static irqreturn_t
7d12e780 2257bnx2_interrupt(int irq, void *dev_instance)
b6016b76
MC
2258{
2259 struct net_device *dev = dev_instance;
972ec0d4 2260 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2261
2262 /* When using INTx, it is possible for the interrupt to arrive
2263 * at the CPU before the status block posted prior to the
2264 * interrupt. Reading a register will flush the status block.
2265 * When using MSI, the MSI message will always complete after
2266 * the status block write.
2267 */
c921e4c4 2268 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
b6016b76
MC
2269 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2270 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 2271 return IRQ_NONE;
b6016b76
MC
2272
2273 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2274 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2275 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2276
2277 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
2278 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2279 return IRQ_HANDLED;
b6016b76 2280
73eef4cd 2281 netif_rx_schedule(dev);
b6016b76 2282
73eef4cd 2283 return IRQ_HANDLED;
b6016b76
MC
2284}
2285
f4e418f7
MC
2286static inline int
2287bnx2_has_work(struct bnx2 *bp)
2288{
2289 struct status_block *sblk = bp->status_blk;
2290
2291 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2292 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2293 return 1;
2294
db8b2255
MC
2295 if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2296 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
f4e418f7
MC
2297 return 1;
2298
2299 return 0;
2300}
2301
b6016b76
MC
2302static int
2303bnx2_poll(struct net_device *dev, int *budget)
2304{
972ec0d4 2305 struct bnx2 *bp = netdev_priv(dev);
b6016b76 2306
b6016b76
MC
2307 if ((bp->status_blk->status_attn_bits &
2308 STATUS_ATTN_BITS_LINK_STATE) !=
2309 (bp->status_blk->status_attn_bits_ack &
2310 STATUS_ATTN_BITS_LINK_STATE)) {
2311
c770a65c 2312 spin_lock(&bp->phy_lock);
b6016b76 2313 bnx2_phy_int(bp);
c770a65c 2314 spin_unlock(&bp->phy_lock);
bf5295bb
MC
2315
2316 /* This is needed to take care of transient status
2317 * during link changes.
2318 */
2319 REG_WR(bp, BNX2_HC_COMMAND,
2320 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2321 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
2322 }
2323
f4e418f7 2324 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
b6016b76 2325 bnx2_tx_int(bp);
b6016b76 2326
f4e418f7 2327 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
b6016b76
MC
2328 int orig_budget = *budget;
2329 int work_done;
2330
2331 if (orig_budget > dev->quota)
2332 orig_budget = dev->quota;
6aa20a22 2333
b6016b76
MC
2334 work_done = bnx2_rx_int(bp, orig_budget);
2335 *budget -= work_done;
2336 dev->quota -= work_done;
b6016b76 2337 }
6aa20a22 2338
f4e418f7
MC
2339 bp->last_status_idx = bp->status_blk->status_idx;
2340 rmb();
2341
2342 if (!bnx2_has_work(bp)) {
b6016b76 2343 netif_rx_complete(dev);
1269a8a6
MC
2344 if (likely(bp->flags & USING_MSI_FLAG)) {
2345 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2346 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2347 bp->last_status_idx);
2348 return 0;
2349 }
2350 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2351 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2352 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2353 bp->last_status_idx);
2354
b6016b76 2355 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1269a8a6
MC
2356 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2357 bp->last_status_idx);
b6016b76
MC
2358 return 0;
2359 }
2360
2361 return 1;
2362}
2363
932ff279 2364/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
2365 * from set_multicast.
2366 */
2367static void
2368bnx2_set_rx_mode(struct net_device *dev)
2369{
972ec0d4 2370 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2371 u32 rx_mode, sort_mode;
2372 int i;
b6016b76 2373
c770a65c 2374 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
2375
2376 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2377 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2378 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2379#ifdef BCM_VLAN
e29054f9 2380 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 2381 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 2382#else
e29054f9
MC
2383 if (!(bp->flags & ASF_ENABLE_FLAG))
2384 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
2385#endif
2386 if (dev->flags & IFF_PROMISC) {
2387 /* Promiscuous mode. */
2388 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
2389 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2390 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
2391 }
2392 else if (dev->flags & IFF_ALLMULTI) {
2393 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2394 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2395 0xffffffff);
2396 }
2397 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2398 }
2399 else {
2400 /* Accept one or more multicast(s). */
2401 struct dev_mc_list *mclist;
2402 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2403 u32 regidx;
2404 u32 bit;
2405 u32 crc;
2406
2407 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2408
2409 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2410 i++, mclist = mclist->next) {
2411
2412 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2413 bit = crc & 0xff;
2414 regidx = (bit & 0xe0) >> 5;
2415 bit &= 0x1f;
2416 mc_filter[regidx] |= (1 << bit);
2417 }
2418
2419 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2420 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2421 mc_filter[i]);
2422 }
2423
2424 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2425 }
2426
2427 if (rx_mode != bp->rx_mode) {
2428 bp->rx_mode = rx_mode;
2429 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2430 }
2431
2432 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2433 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2434 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2435
c770a65c 2436 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
2437}
2438
fba9fe91
MC
2439#define FW_BUF_SIZE 0x8000
2440
2441static int
2442bnx2_gunzip_init(struct bnx2 *bp)
2443{
2444 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2445 goto gunzip_nomem1;
2446
2447 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2448 goto gunzip_nomem2;
2449
2450 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2451 if (bp->strm->workspace == NULL)
2452 goto gunzip_nomem3;
2453
2454 return 0;
2455
2456gunzip_nomem3:
2457 kfree(bp->strm);
2458 bp->strm = NULL;
2459
2460gunzip_nomem2:
2461 vfree(bp->gunzip_buf);
2462 bp->gunzip_buf = NULL;
2463
2464gunzip_nomem1:
2465 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2466 "uncompression.\n", bp->dev->name);
2467 return -ENOMEM;
2468}
2469
2470static void
2471bnx2_gunzip_end(struct bnx2 *bp)
2472{
2473 kfree(bp->strm->workspace);
2474
2475 kfree(bp->strm);
2476 bp->strm = NULL;
2477
2478 if (bp->gunzip_buf) {
2479 vfree(bp->gunzip_buf);
2480 bp->gunzip_buf = NULL;
2481 }
2482}
2483
2484static int
2485bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2486{
2487 int n, rc;
2488
2489 /* check gzip header */
2490 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2491 return -EINVAL;
2492
2493 n = 10;
2494
2495#define FNAME 0x8
2496 if (zbuf[3] & FNAME)
2497 while ((zbuf[n++] != 0) && (n < len));
2498
2499 bp->strm->next_in = zbuf + n;
2500 bp->strm->avail_in = len - n;
2501 bp->strm->next_out = bp->gunzip_buf;
2502 bp->strm->avail_out = FW_BUF_SIZE;
2503
2504 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2505 if (rc != Z_OK)
2506 return rc;
2507
2508 rc = zlib_inflate(bp->strm, Z_FINISH);
2509
2510 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2511 *outbuf = bp->gunzip_buf;
2512
2513 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2514 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2515 bp->dev->name, bp->strm->msg);
2516
2517 zlib_inflateEnd(bp->strm);
2518
2519 if (rc == Z_STREAM_END)
2520 return 0;
2521
2522 return rc;
2523}
2524
b6016b76
MC
2525static void
2526load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2527 u32 rv2p_proc)
2528{
2529 int i;
2530 u32 val;
2531
2532
2533 for (i = 0; i < rv2p_code_len; i += 8) {
fba9fe91 2534 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
b6016b76 2535 rv2p_code++;
fba9fe91 2536 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
b6016b76
MC
2537 rv2p_code++;
2538
2539 if (rv2p_proc == RV2P_PROC1) {
2540 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2541 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2542 }
2543 else {
2544 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2545 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2546 }
2547 }
2548
2549 /* Reset the processor, un-stall is done later. */
2550 if (rv2p_proc == RV2P_PROC1) {
2551 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2552 }
2553 else {
2554 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2555 }
2556}
2557
af3ee519 2558static int
b6016b76
MC
2559load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2560{
2561 u32 offset;
2562 u32 val;
af3ee519 2563 int rc;
b6016b76
MC
2564
2565 /* Halt the CPU. */
2566 val = REG_RD_IND(bp, cpu_reg->mode);
2567 val |= cpu_reg->mode_value_halt;
2568 REG_WR_IND(bp, cpu_reg->mode, val);
2569 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2570
2571 /* Load the Text area. */
2572 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519
MC
2573 if (fw->gz_text) {
2574 u32 text_len;
2575 void *text;
2576
2577 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2578 &text_len);
2579 if (rc)
2580 return rc;
2581
2582 fw->text = text;
2583 }
2584 if (fw->gz_text) {
b6016b76
MC
2585 int j;
2586
2587 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
fba9fe91 2588 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
b6016b76
MC
2589 }
2590 }
2591
2592 /* Load the Data area. */
2593 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2594 if (fw->data) {
2595 int j;
2596
2597 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2598 REG_WR_IND(bp, offset, fw->data[j]);
2599 }
2600 }
2601
2602 /* Load the SBSS area. */
2603 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2604 if (fw->sbss) {
2605 int j;
2606
2607 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2608 REG_WR_IND(bp, offset, fw->sbss[j]);
2609 }
2610 }
2611
2612 /* Load the BSS area. */
2613 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2614 if (fw->bss) {
2615 int j;
2616
2617 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2618 REG_WR_IND(bp, offset, fw->bss[j]);
2619 }
2620 }
2621
2622 /* Load the Read-Only area. */
2623 offset = cpu_reg->spad_base +
2624 (fw->rodata_addr - cpu_reg->mips_view_base);
2625 if (fw->rodata) {
2626 int j;
2627
2628 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2629 REG_WR_IND(bp, offset, fw->rodata[j]);
2630 }
2631 }
2632
2633 /* Clear the pre-fetch instruction. */
2634 REG_WR_IND(bp, cpu_reg->inst, 0);
2635 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2636
2637 /* Start the CPU. */
2638 val = REG_RD_IND(bp, cpu_reg->mode);
2639 val &= ~cpu_reg->mode_value_halt;
2640 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2641 REG_WR_IND(bp, cpu_reg->mode, val);
af3ee519
MC
2642
2643 return 0;
b6016b76
MC
2644}
2645
fba9fe91 2646static int
b6016b76
MC
2647bnx2_init_cpus(struct bnx2 *bp)
2648{
2649 struct cpu_reg cpu_reg;
af3ee519 2650 struct fw_info *fw;
fba9fe91
MC
2651 int rc = 0;
2652 void *text;
2653 u32 text_len;
2654
2655 if ((rc = bnx2_gunzip_init(bp)) != 0)
2656 return rc;
b6016b76
MC
2657
2658 /* Initialize the RV2P processor. */
fba9fe91
MC
2659 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2660 &text_len);
2661 if (rc)
2662 goto init_cpu_err;
2663
2664 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2665
2666 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2667 &text_len);
2668 if (rc)
2669 goto init_cpu_err;
2670
2671 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
b6016b76
MC
2672
2673 /* Initialize the RX Processor. */
2674 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2675 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2676 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2677 cpu_reg.state = BNX2_RXP_CPU_STATE;
2678 cpu_reg.state_value_clear = 0xffffff;
2679 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2680 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2681 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2682 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2683 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2684 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2685 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2686
d43584c8
MC
2687 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2688 fw = &bnx2_rxp_fw_09;
2689 else
2690 fw = &bnx2_rxp_fw_06;
fba9fe91 2691
af3ee519 2692 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2693 if (rc)
2694 goto init_cpu_err;
2695
b6016b76
MC
2696 /* Initialize the TX Processor. */
2697 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2698 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2699 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2700 cpu_reg.state = BNX2_TXP_CPU_STATE;
2701 cpu_reg.state_value_clear = 0xffffff;
2702 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2703 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2704 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2705 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2706 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2707 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2708 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2709
d43584c8
MC
2710 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2711 fw = &bnx2_txp_fw_09;
2712 else
2713 fw = &bnx2_txp_fw_06;
fba9fe91 2714
af3ee519 2715 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2716 if (rc)
2717 goto init_cpu_err;
2718
b6016b76
MC
2719 /* Initialize the TX Patch-up Processor. */
2720 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2721 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2722 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2723 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2724 cpu_reg.state_value_clear = 0xffffff;
2725 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2726 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2727 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2728 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2729 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2730 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2731 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2732
d43584c8
MC
2733 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2734 fw = &bnx2_tpat_fw_09;
2735 else
2736 fw = &bnx2_tpat_fw_06;
fba9fe91 2737
af3ee519 2738 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2739 if (rc)
2740 goto init_cpu_err;
2741
b6016b76
MC
2742 /* Initialize the Completion Processor. */
2743 cpu_reg.mode = BNX2_COM_CPU_MODE;
2744 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2745 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2746 cpu_reg.state = BNX2_COM_CPU_STATE;
2747 cpu_reg.state_value_clear = 0xffffff;
2748 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2749 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2750 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2751 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2752 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2753 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2754 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2755
d43584c8
MC
2756 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2757 fw = &bnx2_com_fw_09;
2758 else
2759 fw = &bnx2_com_fw_06;
fba9fe91 2760
af3ee519 2761 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2762 if (rc)
2763 goto init_cpu_err;
2764
d43584c8
MC
2765 /* Initialize the Command Processor. */
2766 cpu_reg.mode = BNX2_CP_CPU_MODE;
2767 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2768 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2769 cpu_reg.state = BNX2_CP_CPU_STATE;
2770 cpu_reg.state_value_clear = 0xffffff;
2771 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2772 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2773 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2774 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2775 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2776 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2777 cpu_reg.mips_view_base = 0x8000000;
b6016b76 2778
d43584c8
MC
2779 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2780 fw = &bnx2_cp_fw_09;
b6016b76 2781
6c1bbcc8 2782 rc = load_cpu_fw(bp, &cpu_reg, fw);
d43584c8
MC
2783 if (rc)
2784 goto init_cpu_err;
2785 }
fba9fe91
MC
2786init_cpu_err:
2787 bnx2_gunzip_end(bp);
2788 return rc;
b6016b76
MC
2789}
2790
2791static int
829ca9a3 2792bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
2793{
2794 u16 pmcsr;
2795
2796 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2797
2798 switch (state) {
829ca9a3 2799 case PCI_D0: {
b6016b76
MC
2800 u32 val;
2801
2802 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2803 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2804 PCI_PM_CTRL_PME_STATUS);
2805
2806 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2807 /* delay required during transition out of D3hot */
2808 msleep(20);
2809
2810 val = REG_RD(bp, BNX2_EMAC_MODE);
2811 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2812 val &= ~BNX2_EMAC_MODE_MPKT;
2813 REG_WR(bp, BNX2_EMAC_MODE, val);
2814
2815 val = REG_RD(bp, BNX2_RPM_CONFIG);
2816 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2817 REG_WR(bp, BNX2_RPM_CONFIG, val);
2818 break;
2819 }
829ca9a3 2820 case PCI_D3hot: {
b6016b76
MC
2821 int i;
2822 u32 val, wol_msg;
2823
2824 if (bp->wol) {
2825 u32 advertising;
2826 u8 autoneg;
2827
2828 autoneg = bp->autoneg;
2829 advertising = bp->advertising;
2830
2831 bp->autoneg = AUTONEG_SPEED;
2832 bp->advertising = ADVERTISED_10baseT_Half |
2833 ADVERTISED_10baseT_Full |
2834 ADVERTISED_100baseT_Half |
2835 ADVERTISED_100baseT_Full |
2836 ADVERTISED_Autoneg;
2837
2838 bnx2_setup_copper_phy(bp);
2839
2840 bp->autoneg = autoneg;
2841 bp->advertising = advertising;
2842
2843 bnx2_set_mac_addr(bp);
2844
2845 val = REG_RD(bp, BNX2_EMAC_MODE);
2846
2847 /* Enable port mode. */
2848 val &= ~BNX2_EMAC_MODE_PORT;
2849 val |= BNX2_EMAC_MODE_PORT_MII |
2850 BNX2_EMAC_MODE_MPKT_RCVD |
2851 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76
MC
2852 BNX2_EMAC_MODE_MPKT;
2853
2854 REG_WR(bp, BNX2_EMAC_MODE, val);
2855
2856 /* receive all multicast */
2857 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2858 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2859 0xffffffff);
2860 }
2861 REG_WR(bp, BNX2_EMAC_RX_MODE,
2862 BNX2_EMAC_RX_MODE_SORT_MODE);
2863
2864 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2865 BNX2_RPM_SORT_USER0_MC_EN;
2866 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2867 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2868 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2869 BNX2_RPM_SORT_USER0_ENA);
2870
2871 /* Need to enable EMAC and RPM for WOL. */
2872 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2873 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2874 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2875 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2876
2877 val = REG_RD(bp, BNX2_RPM_CONFIG);
2878 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2879 REG_WR(bp, BNX2_RPM_CONFIG, val);
2880
2881 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2882 }
2883 else {
2884 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2885 }
2886
dda1e390
MC
2887 if (!(bp->flags & NO_WOL_FLAG))
2888 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
2889
2890 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2891 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2892 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2893
2894 if (bp->wol)
2895 pmcsr |= 3;
2896 }
2897 else {
2898 pmcsr |= 3;
2899 }
2900 if (bp->wol) {
2901 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2902 }
2903 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2904 pmcsr);
2905
2906 /* No more memory access after this point until
2907 * device is brought back to D0.
2908 */
2909 udelay(50);
2910 break;
2911 }
2912 default:
2913 return -EINVAL;
2914 }
2915 return 0;
2916}
2917
2918static int
2919bnx2_acquire_nvram_lock(struct bnx2 *bp)
2920{
2921 u32 val;
2922 int j;
2923
2924 /* Request access to the flash interface. */
2925 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2926 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2927 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2928 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2929 break;
2930
2931 udelay(5);
2932 }
2933
2934 if (j >= NVRAM_TIMEOUT_COUNT)
2935 return -EBUSY;
2936
2937 return 0;
2938}
2939
2940static int
2941bnx2_release_nvram_lock(struct bnx2 *bp)
2942{
2943 int j;
2944 u32 val;
2945
2946 /* Relinquish nvram interface. */
2947 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2948
2949 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2950 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2951 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2952 break;
2953
2954 udelay(5);
2955 }
2956
2957 if (j >= NVRAM_TIMEOUT_COUNT)
2958 return -EBUSY;
2959
2960 return 0;
2961}
2962
2963
2964static int
2965bnx2_enable_nvram_write(struct bnx2 *bp)
2966{
2967 u32 val;
2968
2969 val = REG_RD(bp, BNX2_MISC_CFG);
2970 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2971
2972 if (!bp->flash_info->buffered) {
2973 int j;
2974
2975 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2976 REG_WR(bp, BNX2_NVM_COMMAND,
2977 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2978
2979 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2980 udelay(5);
2981
2982 val = REG_RD(bp, BNX2_NVM_COMMAND);
2983 if (val & BNX2_NVM_COMMAND_DONE)
2984 break;
2985 }
2986
2987 if (j >= NVRAM_TIMEOUT_COUNT)
2988 return -EBUSY;
2989 }
2990 return 0;
2991}
2992
2993static void
2994bnx2_disable_nvram_write(struct bnx2 *bp)
2995{
2996 u32 val;
2997
2998 val = REG_RD(bp, BNX2_MISC_CFG);
2999 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3000}
3001
3002
3003static void
3004bnx2_enable_nvram_access(struct bnx2 *bp)
3005{
3006 u32 val;
3007
3008 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3009 /* Enable both bits, even on read. */
6aa20a22 3010 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3011 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3012}
3013
3014static void
3015bnx2_disable_nvram_access(struct bnx2 *bp)
3016{
3017 u32 val;
3018
3019 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3020 /* Disable both bits, even after read. */
6aa20a22 3021 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3022 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3023 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3024}
3025
3026static int
3027bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3028{
3029 u32 cmd;
3030 int j;
3031
3032 if (bp->flash_info->buffered)
3033 /* Buffered flash, no erase needed */
3034 return 0;
3035
3036 /* Build an erase command */
3037 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3038 BNX2_NVM_COMMAND_DOIT;
3039
3040 /* Need to clear DONE bit separately. */
3041 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3042
3043 /* Address of the NVRAM to read from. */
3044 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3045
3046 /* Issue an erase command. */
3047 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3048
3049 /* Wait for completion. */
3050 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3051 u32 val;
3052
3053 udelay(5);
3054
3055 val = REG_RD(bp, BNX2_NVM_COMMAND);
3056 if (val & BNX2_NVM_COMMAND_DONE)
3057 break;
3058 }
3059
3060 if (j >= NVRAM_TIMEOUT_COUNT)
3061 return -EBUSY;
3062
3063 return 0;
3064}
3065
3066static int
3067bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3068{
3069 u32 cmd;
3070 int j;
3071
3072 /* Build the command word. */
3073 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3074
3075 /* Calculate an offset of a buffered flash. */
3076 if (bp->flash_info->buffered) {
3077 offset = ((offset / bp->flash_info->page_size) <<
3078 bp->flash_info->page_bits) +
3079 (offset % bp->flash_info->page_size);
3080 }
3081
3082 /* Need to clear DONE bit separately. */
3083 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3084
3085 /* Address of the NVRAM to read from. */
3086 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3087
3088 /* Issue a read command. */
3089 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3090
3091 /* Wait for completion. */
3092 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3093 u32 val;
3094
3095 udelay(5);
3096
3097 val = REG_RD(bp, BNX2_NVM_COMMAND);
3098 if (val & BNX2_NVM_COMMAND_DONE) {
3099 val = REG_RD(bp, BNX2_NVM_READ);
3100
3101 val = be32_to_cpu(val);
3102 memcpy(ret_val, &val, 4);
3103 break;
3104 }
3105 }
3106 if (j >= NVRAM_TIMEOUT_COUNT)
3107 return -EBUSY;
3108
3109 return 0;
3110}
3111
3112
3113static int
3114bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3115{
3116 u32 cmd, val32;
3117 int j;
3118
3119 /* Build the command word. */
3120 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3121
3122 /* Calculate an offset of a buffered flash. */
3123 if (bp->flash_info->buffered) {
3124 offset = ((offset / bp->flash_info->page_size) <<
3125 bp->flash_info->page_bits) +
3126 (offset % bp->flash_info->page_size);
3127 }
3128
3129 /* Need to clear DONE bit separately. */
3130 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3131
3132 memcpy(&val32, val, 4);
3133 val32 = cpu_to_be32(val32);
3134
3135 /* Write the data. */
3136 REG_WR(bp, BNX2_NVM_WRITE, val32);
3137
3138 /* Address of the NVRAM to write to. */
3139 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3140
3141 /* Issue the write command. */
3142 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3143
3144 /* Wait for completion. */
3145 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3146 udelay(5);
3147
3148 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3149 break;
3150 }
3151 if (j >= NVRAM_TIMEOUT_COUNT)
3152 return -EBUSY;
3153
3154 return 0;
3155}
3156
3157static int
3158bnx2_init_nvram(struct bnx2 *bp)
3159{
3160 u32 val;
3161 int j, entry_count, rc;
3162 struct flash_spec *flash;
3163
3164 /* Determine the selected interface. */
3165 val = REG_RD(bp, BNX2_NVM_CFG1);
3166
3167 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3168
3169 rc = 0;
3170 if (val & 0x40000000) {
3171
3172 /* Flash interface has been reconfigured */
3173 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
3174 j++, flash++) {
3175 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3176 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
3177 bp->flash_info = flash;
3178 break;
3179 }
3180 }
3181 }
3182 else {
37137709 3183 u32 mask;
b6016b76
MC
3184 /* Not yet been reconfigured */
3185
37137709
MC
3186 if (val & (1 << 23))
3187 mask = FLASH_BACKUP_STRAP_MASK;
3188 else
3189 mask = FLASH_STRAP_MASK;
3190
b6016b76
MC
3191 for (j = 0, flash = &flash_table[0]; j < entry_count;
3192 j++, flash++) {
3193
37137709 3194 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
3195 bp->flash_info = flash;
3196
3197 /* Request access to the flash interface. */
3198 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3199 return rc;
3200
3201 /* Enable access to flash interface */
3202 bnx2_enable_nvram_access(bp);
3203
3204 /* Reconfigure the flash interface */
3205 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3206 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3207 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3208 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3209
3210 /* Disable access to flash interface */
3211 bnx2_disable_nvram_access(bp);
3212 bnx2_release_nvram_lock(bp);
3213
3214 break;
3215 }
3216 }
3217 } /* if (val & 0x40000000) */
3218
3219 if (j == entry_count) {
3220 bp->flash_info = NULL;
2f23c523 3221 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 3222 return -ENODEV;
b6016b76
MC
3223 }
3224
1122db71
MC
3225 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3226 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3227 if (val)
3228 bp->flash_size = val;
3229 else
3230 bp->flash_size = bp->flash_info->total_size;
3231
b6016b76
MC
3232 return rc;
3233}
3234
3235static int
3236bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3237 int buf_size)
3238{
3239 int rc = 0;
3240 u32 cmd_flags, offset32, len32, extra;
3241
3242 if (buf_size == 0)
3243 return 0;
3244
3245 /* Request access to the flash interface. */
3246 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3247 return rc;
3248
3249 /* Enable access to flash interface */
3250 bnx2_enable_nvram_access(bp);
3251
3252 len32 = buf_size;
3253 offset32 = offset;
3254 extra = 0;
3255
3256 cmd_flags = 0;
3257
3258 if (offset32 & 3) {
3259 u8 buf[4];
3260 u32 pre_len;
3261
3262 offset32 &= ~3;
3263 pre_len = 4 - (offset & 3);
3264
3265 if (pre_len >= len32) {
3266 pre_len = len32;
3267 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3268 BNX2_NVM_COMMAND_LAST;
3269 }
3270 else {
3271 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3272 }
3273
3274 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3275
3276 if (rc)
3277 return rc;
3278
3279 memcpy(ret_buf, buf + (offset & 3), pre_len);
3280
3281 offset32 += 4;
3282 ret_buf += pre_len;
3283 len32 -= pre_len;
3284 }
3285 if (len32 & 3) {
3286 extra = 4 - (len32 & 3);
3287 len32 = (len32 + 4) & ~3;
3288 }
3289
3290 if (len32 == 4) {
3291 u8 buf[4];
3292
3293 if (cmd_flags)
3294 cmd_flags = BNX2_NVM_COMMAND_LAST;
3295 else
3296 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3297 BNX2_NVM_COMMAND_LAST;
3298
3299 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3300
3301 memcpy(ret_buf, buf, 4 - extra);
3302 }
3303 else if (len32 > 0) {
3304 u8 buf[4];
3305
3306 /* Read the first word. */
3307 if (cmd_flags)
3308 cmd_flags = 0;
3309 else
3310 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3311
3312 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3313
3314 /* Advance to the next dword. */
3315 offset32 += 4;
3316 ret_buf += 4;
3317 len32 -= 4;
3318
3319 while (len32 > 4 && rc == 0) {
3320 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3321
3322 /* Advance to the next dword. */
3323 offset32 += 4;
3324 ret_buf += 4;
3325 len32 -= 4;
3326 }
3327
3328 if (rc)
3329 return rc;
3330
3331 cmd_flags = BNX2_NVM_COMMAND_LAST;
3332 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3333
3334 memcpy(ret_buf, buf, 4 - extra);
3335 }
3336
3337 /* Disable access to flash interface */
3338 bnx2_disable_nvram_access(bp);
3339
3340 bnx2_release_nvram_lock(bp);
3341
3342 return rc;
3343}
3344
3345static int
3346bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3347 int buf_size)
3348{
3349 u32 written, offset32, len32;
e6be763f 3350 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
3351 int rc = 0;
3352 int align_start, align_end;
3353
3354 buf = data_buf;
3355 offset32 = offset;
3356 len32 = buf_size;
3357 align_start = align_end = 0;
3358
3359 if ((align_start = (offset32 & 3))) {
3360 offset32 &= ~3;
c873879c
MC
3361 len32 += align_start;
3362 if (len32 < 4)
3363 len32 = 4;
b6016b76
MC
3364 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3365 return rc;
3366 }
3367
3368 if (len32 & 3) {
c873879c
MC
3369 align_end = 4 - (len32 & 3);
3370 len32 += align_end;
3371 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3372 return rc;
b6016b76
MC
3373 }
3374
3375 if (align_start || align_end) {
e6be763f
MC
3376 align_buf = kmalloc(len32, GFP_KERNEL);
3377 if (align_buf == NULL)
b6016b76
MC
3378 return -ENOMEM;
3379 if (align_start) {
e6be763f 3380 memcpy(align_buf, start, 4);
b6016b76
MC
3381 }
3382 if (align_end) {
e6be763f 3383 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 3384 }
e6be763f
MC
3385 memcpy(align_buf + align_start, data_buf, buf_size);
3386 buf = align_buf;
b6016b76
MC
3387 }
3388
ae181bc4
MC
3389 if (bp->flash_info->buffered == 0) {
3390 flash_buffer = kmalloc(264, GFP_KERNEL);
3391 if (flash_buffer == NULL) {
3392 rc = -ENOMEM;
3393 goto nvram_write_end;
3394 }
3395 }
3396
b6016b76
MC
3397 written = 0;
3398 while ((written < len32) && (rc == 0)) {
3399 u32 page_start, page_end, data_start, data_end;
3400 u32 addr, cmd_flags;
3401 int i;
b6016b76
MC
3402
3403 /* Find the page_start addr */
3404 page_start = offset32 + written;
3405 page_start -= (page_start % bp->flash_info->page_size);
3406 /* Find the page_end addr */
3407 page_end = page_start + bp->flash_info->page_size;
3408 /* Find the data_start addr */
3409 data_start = (written == 0) ? offset32 : page_start;
3410 /* Find the data_end addr */
6aa20a22 3411 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
3412 (offset32 + len32) : page_end;
3413
3414 /* Request access to the flash interface. */
3415 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3416 goto nvram_write_end;
3417
3418 /* Enable access to flash interface */
3419 bnx2_enable_nvram_access(bp);
3420
3421 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3422 if (bp->flash_info->buffered == 0) {
3423 int j;
3424
3425 /* Read the whole page into the buffer
3426 * (non-buffer flash only) */
3427 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3428 if (j == (bp->flash_info->page_size - 4)) {
3429 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3430 }
3431 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
3432 page_start + j,
3433 &flash_buffer[j],
b6016b76
MC
3434 cmd_flags);
3435
3436 if (rc)
3437 goto nvram_write_end;
3438
3439 cmd_flags = 0;
3440 }
3441 }
3442
3443 /* Enable writes to flash interface (unlock write-protect) */
3444 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3445 goto nvram_write_end;
3446
b6016b76
MC
3447 /* Loop to write back the buffer data from page_start to
3448 * data_start */
3449 i = 0;
3450 if (bp->flash_info->buffered == 0) {
c873879c
MC
3451 /* Erase the page */
3452 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3453 goto nvram_write_end;
3454
3455 /* Re-enable the write again for the actual write */
3456 bnx2_enable_nvram_write(bp);
3457
b6016b76
MC
3458 for (addr = page_start; addr < data_start;
3459 addr += 4, i += 4) {
6aa20a22 3460
b6016b76
MC
3461 rc = bnx2_nvram_write_dword(bp, addr,
3462 &flash_buffer[i], cmd_flags);
3463
3464 if (rc != 0)
3465 goto nvram_write_end;
3466
3467 cmd_flags = 0;
3468 }
3469 }
3470
3471 /* Loop to write the new data from data_start to data_end */
bae25761 3472 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76
MC
3473 if ((addr == page_end - 4) ||
3474 ((bp->flash_info->buffered) &&
3475 (addr == data_end - 4))) {
3476
3477 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3478 }
3479 rc = bnx2_nvram_write_dword(bp, addr, buf,
3480 cmd_flags);
3481
3482 if (rc != 0)
3483 goto nvram_write_end;
3484
3485 cmd_flags = 0;
3486 buf += 4;
3487 }
3488
3489 /* Loop to write back the buffer data from data_end
3490 * to page_end */
3491 if (bp->flash_info->buffered == 0) {
3492 for (addr = data_end; addr < page_end;
3493 addr += 4, i += 4) {
6aa20a22 3494
b6016b76
MC
3495 if (addr == page_end-4) {
3496 cmd_flags = BNX2_NVM_COMMAND_LAST;
3497 }
3498 rc = bnx2_nvram_write_dword(bp, addr,
3499 &flash_buffer[i], cmd_flags);
3500
3501 if (rc != 0)
3502 goto nvram_write_end;
3503
3504 cmd_flags = 0;
3505 }
3506 }
3507
3508 /* Disable writes to flash interface (lock write-protect) */
3509 bnx2_disable_nvram_write(bp);
3510
3511 /* Disable access to flash interface */
3512 bnx2_disable_nvram_access(bp);
3513 bnx2_release_nvram_lock(bp);
3514
3515 /* Increment written */
3516 written += data_end - data_start;
3517 }
3518
3519nvram_write_end:
e6be763f
MC
3520 kfree(flash_buffer);
3521 kfree(align_buf);
b6016b76
MC
3522 return rc;
3523}
3524
3525static int
3526bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3527{
3528 u32 val;
3529 int i, rc = 0;
3530
3531 /* Wait for the current PCI transaction to complete before
3532 * issuing a reset. */
3533 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3534 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3535 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3536 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3537 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3538 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3539 udelay(5);
3540
b090ae2b
MC
3541 /* Wait for the firmware to tell us it is ok to issue a reset. */
3542 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3543
b6016b76
MC
3544 /* Deposit a driver reset signature so the firmware knows that
3545 * this is a soft reset. */
e3648b3d 3546 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
3547 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3548
b6016b76
MC
3549 /* Do a dummy read to force the chip to complete all current transaction
3550 * before we issue a reset. */
3551 val = REG_RD(bp, BNX2_MISC_ID);
3552
234754d5
MC
3553 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3554 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3555 REG_RD(bp, BNX2_MISC_COMMAND);
3556 udelay(5);
b6016b76 3557
234754d5
MC
3558 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3559 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 3560
234754d5 3561 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 3562
234754d5
MC
3563 } else {
3564 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3565 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3566 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3567
3568 /* Chip reset. */
3569 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3570
3571 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3572 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3573 current->state = TASK_UNINTERRUPTIBLE;
3574 schedule_timeout(HZ / 50);
b6016b76 3575 }
b6016b76 3576
234754d5
MC
3577 /* Reset takes approximate 30 usec */
3578 for (i = 0; i < 10; i++) {
3579 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3580 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3581 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3582 break;
3583 udelay(10);
3584 }
3585
3586 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3587 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3588 printk(KERN_ERR PFX "Chip reset did not complete\n");
3589 return -EBUSY;
3590 }
b6016b76
MC
3591 }
3592
3593 /* Make sure byte swapping is properly configured. */
3594 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3595 if (val != 0x01020304) {
3596 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3597 return -ENODEV;
3598 }
3599
b6016b76 3600 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
3601 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3602 if (rc)
3603 return rc;
b6016b76
MC
3604
3605 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3606 /* Adjust the voltage regular to two steps lower. The default
3607 * of this register is 0x0000000e. */
3608 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3609
3610 /* Remove bad rbuf memory from the free pool. */
3611 rc = bnx2_alloc_bad_rbuf(bp);
3612 }
3613
3614 return rc;
3615}
3616
3617static int
3618bnx2_init_chip(struct bnx2 *bp)
3619{
3620 u32 val;
b090ae2b 3621 int rc;
b6016b76
MC
3622
3623 /* Make sure the interrupt is not active. */
3624 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3625
3626 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3627 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3628#ifdef __BIG_ENDIAN
6aa20a22 3629 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 3630#endif
6aa20a22 3631 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
3632 DMA_READ_CHANS << 12 |
3633 DMA_WRITE_CHANS << 16;
3634
3635 val |= (0x2 << 20) | (1 << 11);
3636
dda1e390 3637 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
3638 val |= (1 << 23);
3639
3640 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3641 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3642 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3643
3644 REG_WR(bp, BNX2_DMA_CONFIG, val);
3645
3646 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3647 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3648 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3649 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3650 }
3651
3652 if (bp->flags & PCIX_FLAG) {
3653 u16 val16;
3654
3655 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3656 &val16);
3657 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3658 val16 & ~PCI_X_CMD_ERO);
3659 }
3660
3661 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3662 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3663 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3664 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3665
3666 /* Initialize context mapping and zero out the quick contexts. The
3667 * context block must have already been enabled. */
59b47d8a
MC
3668 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3669 bnx2_init_5709_context(bp);
3670 else
3671 bnx2_init_context(bp);
b6016b76 3672
fba9fe91
MC
3673 if ((rc = bnx2_init_cpus(bp)) != 0)
3674 return rc;
3675
b6016b76
MC
3676 bnx2_init_nvram(bp);
3677
3678 bnx2_set_mac_addr(bp);
3679
3680 val = REG_RD(bp, BNX2_MQ_CONFIG);
3681 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3682 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
3683 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3684 val |= BNX2_MQ_CONFIG_HALT_DIS;
3685
b6016b76
MC
3686 REG_WR(bp, BNX2_MQ_CONFIG, val);
3687
3688 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3689 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3690 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3691
3692 val = (BCM_PAGE_BITS - 8) << 24;
3693 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3694
3695 /* Configure page size. */
3696 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3697 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3698 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3699 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3700
3701 val = bp->mac_addr[0] +
3702 (bp->mac_addr[1] << 8) +
3703 (bp->mac_addr[2] << 16) +
3704 bp->mac_addr[3] +
3705 (bp->mac_addr[4] << 8) +
3706 (bp->mac_addr[5] << 16);
3707 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3708
3709 /* Program the MTU. Also include 4 bytes for CRC32. */
3710 val = bp->dev->mtu + ETH_HLEN + 4;
3711 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3712 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3713 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3714
3715 bp->last_status_idx = 0;
3716 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3717
3718 /* Set up how to generate a link change interrupt. */
3719 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3720
3721 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3722 (u64) bp->status_blk_mapping & 0xffffffff);
3723 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3724
3725 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3726 (u64) bp->stats_blk_mapping & 0xffffffff);
3727 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3728 (u64) bp->stats_blk_mapping >> 32);
3729
6aa20a22 3730 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
3731 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3732
3733 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3734 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3735
3736 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3737 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3738
3739 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3740
3741 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3742
3743 REG_WR(bp, BNX2_HC_COM_TICKS,
3744 (bp->com_ticks_int << 16) | bp->com_ticks);
3745
3746 REG_WR(bp, BNX2_HC_CMD_TICKS,
3747 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3748
3749 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3750 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3751
3752 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3753 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3754 else {
3755 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3756 BNX2_HC_CONFIG_TX_TMR_MODE |
3757 BNX2_HC_CONFIG_COLLECT_STATS);
3758 }
3759
3760 /* Clear internal stats counters. */
3761 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3762
3763 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3764
e29054f9
MC
3765 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3766 BNX2_PORT_FEATURE_ASF_ENABLED)
3767 bp->flags |= ASF_ENABLE_FLAG;
3768
b6016b76
MC
3769 /* Initialize the receive filter. */
3770 bnx2_set_rx_mode(bp->dev);
3771
b090ae2b
MC
3772 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3773 0);
b6016b76
MC
3774
3775 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3776 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3777
3778 udelay(20);
3779
bf5295bb
MC
3780 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3781
b090ae2b 3782 return rc;
b6016b76
MC
3783}
3784
59b47d8a
MC
3785static void
3786bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3787{
3788 u32 val, offset0, offset1, offset2, offset3;
3789
3790 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3791 offset0 = BNX2_L2CTX_TYPE_XI;
3792 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3793 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3794 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3795 } else {
3796 offset0 = BNX2_L2CTX_TYPE;
3797 offset1 = BNX2_L2CTX_CMD_TYPE;
3798 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3799 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3800 }
3801 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3802 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3803
3804 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3805 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3806
3807 val = (u64) bp->tx_desc_mapping >> 32;
3808 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3809
3810 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3811 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3812}
b6016b76
MC
3813
3814static void
3815bnx2_init_tx_ring(struct bnx2 *bp)
3816{
3817 struct tx_bd *txbd;
59b47d8a 3818 u32 cid;
b6016b76 3819
2f8af120
MC
3820 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3821
b6016b76 3822 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 3823
b6016b76
MC
3824 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3825 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3826
3827 bp->tx_prod = 0;
3828 bp->tx_cons = 0;
f4e418f7 3829 bp->hw_tx_cons = 0;
b6016b76 3830 bp->tx_prod_bseq = 0;
6aa20a22 3831
59b47d8a
MC
3832 cid = TX_CID;
3833 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3834 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 3835
59b47d8a 3836 bnx2_init_tx_context(bp, cid);
b6016b76
MC
3837}
3838
3839static void
3840bnx2_init_rx_ring(struct bnx2 *bp)
3841{
3842 struct rx_bd *rxbd;
3843 int i;
6aa20a22 3844 u16 prod, ring_prod;
b6016b76
MC
3845 u32 val;
3846
3847 /* 8 for CRC and VLAN */
3848 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
59b47d8a
MC
3849 /* hw alignment */
3850 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
b6016b76
MC
3851
3852 ring_prod = prod = bp->rx_prod = 0;
3853 bp->rx_cons = 0;
f4e418f7 3854 bp->hw_rx_cons = 0;
b6016b76 3855 bp->rx_prod_bseq = 0;
6aa20a22 3856
13daffa2
MC
3857 for (i = 0; i < bp->rx_max_ring; i++) {
3858 int j;
b6016b76 3859
13daffa2
MC
3860 rxbd = &bp->rx_desc_ring[i][0];
3861 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3862 rxbd->rx_bd_len = bp->rx_buf_use_size;
3863 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3864 }
3865 if (i == (bp->rx_max_ring - 1))
3866 j = 0;
3867 else
3868 j = i + 1;
3869 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3870 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3871 0xffffffff;
3872 }
b6016b76
MC
3873
3874 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3875 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3876 val |= 0x02 << 8;
3877 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3878
13daffa2 3879 val = (u64) bp->rx_desc_mapping[0] >> 32;
b6016b76
MC
3880 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3881
13daffa2 3882 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
b6016b76
MC
3883 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3884
236b6394 3885 for (i = 0; i < bp->rx_ring_size; i++) {
b6016b76
MC
3886 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3887 break;
3888 }
3889 prod = NEXT_RX_BD(prod);
3890 ring_prod = RX_RING_IDX(prod);
3891 }
3892 bp->rx_prod = prod;
3893
3894 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3895
3896 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3897}
3898
13daffa2
MC
3899static void
3900bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3901{
3902 u32 num_rings, max;
3903
3904 bp->rx_ring_size = size;
3905 num_rings = 1;
3906 while (size > MAX_RX_DESC_CNT) {
3907 size -= MAX_RX_DESC_CNT;
3908 num_rings++;
3909 }
3910 /* round to next power of 2 */
3911 max = MAX_RX_RINGS;
3912 while ((max & num_rings) == 0)
3913 max >>= 1;
3914
3915 if (num_rings != max)
3916 max <<= 1;
3917
3918 bp->rx_max_ring = max;
3919 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3920}
3921
b6016b76
MC
3922static void
3923bnx2_free_tx_skbs(struct bnx2 *bp)
3924{
3925 int i;
3926
3927 if (bp->tx_buf_ring == NULL)
3928 return;
3929
3930 for (i = 0; i < TX_DESC_CNT; ) {
3931 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3932 struct sk_buff *skb = tx_buf->skb;
3933 int j, last;
3934
3935 if (skb == NULL) {
3936 i++;
3937 continue;
3938 }
3939
3940 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3941 skb_headlen(skb), PCI_DMA_TODEVICE);
3942
3943 tx_buf->skb = NULL;
3944
3945 last = skb_shinfo(skb)->nr_frags;
3946 for (j = 0; j < last; j++) {
3947 tx_buf = &bp->tx_buf_ring[i + j + 1];
3948 pci_unmap_page(bp->pdev,
3949 pci_unmap_addr(tx_buf, mapping),
3950 skb_shinfo(skb)->frags[j].size,
3951 PCI_DMA_TODEVICE);
3952 }
745720e5 3953 dev_kfree_skb(skb);
b6016b76
MC
3954 i += j + 1;
3955 }
3956
3957}
3958
3959static void
3960bnx2_free_rx_skbs(struct bnx2 *bp)
3961{
3962 int i;
3963
3964 if (bp->rx_buf_ring == NULL)
3965 return;
3966
13daffa2 3967 for (i = 0; i < bp->rx_max_ring_idx; i++) {
b6016b76
MC
3968 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3969 struct sk_buff *skb = rx_buf->skb;
3970
05d0f1cf 3971 if (skb == NULL)
b6016b76
MC
3972 continue;
3973
3974 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3975 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3976
3977 rx_buf->skb = NULL;
3978
745720e5 3979 dev_kfree_skb(skb);
b6016b76
MC
3980 }
3981}
3982
3983static void
3984bnx2_free_skbs(struct bnx2 *bp)
3985{
3986 bnx2_free_tx_skbs(bp);
3987 bnx2_free_rx_skbs(bp);
3988}
3989
3990static int
3991bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3992{
3993 int rc;
3994
3995 rc = bnx2_reset_chip(bp, reset_code);
3996 bnx2_free_skbs(bp);
3997 if (rc)
3998 return rc;
3999
fba9fe91
MC
4000 if ((rc = bnx2_init_chip(bp)) != 0)
4001 return rc;
4002
b6016b76
MC
4003 bnx2_init_tx_ring(bp);
4004 bnx2_init_rx_ring(bp);
4005 return 0;
4006}
4007
4008static int
4009bnx2_init_nic(struct bnx2 *bp)
4010{
4011 int rc;
4012
4013 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4014 return rc;
4015
80be4434 4016 spin_lock_bh(&bp->phy_lock);
b6016b76 4017 bnx2_init_phy(bp);
80be4434 4018 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4019 bnx2_set_link(bp);
4020 return 0;
4021}
4022
4023static int
4024bnx2_test_registers(struct bnx2 *bp)
4025{
4026 int ret;
5bae30c9 4027 int i, is_5709;
f71e1309 4028 static const struct {
b6016b76
MC
4029 u16 offset;
4030 u16 flags;
5bae30c9 4031#define BNX2_FL_NOT_5709 1
b6016b76
MC
4032 u32 rw_mask;
4033 u32 ro_mask;
4034 } reg_tbl[] = {
4035 { 0x006c, 0, 0x00000000, 0x0000003f },
4036 { 0x0090, 0, 0xffffffff, 0x00000000 },
4037 { 0x0094, 0, 0x00000000, 0x00000000 },
4038
5bae30c9
MC
4039 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4040 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4041 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4042 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4043 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4044 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4045 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4046 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4047 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4048
4049 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4050 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4051 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4052 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4053 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4054 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4055
4056 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4057 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4058 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
4059
4060 { 0x1000, 0, 0x00000000, 0x00000001 },
4061 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
4062
4063 { 0x1408, 0, 0x01c00800, 0x00000000 },
4064 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4065 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 4066 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
4067 { 0x14b0, 0, 0x00000002, 0x00000001 },
4068 { 0x14b8, 0, 0x00000000, 0x00000000 },
4069 { 0x14c0, 0, 0x00000000, 0x00000009 },
4070 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4071 { 0x14cc, 0, 0x00000000, 0x00000001 },
4072 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
4073
4074 { 0x1800, 0, 0x00000000, 0x00000001 },
4075 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
4076
4077 { 0x2800, 0, 0x00000000, 0x00000001 },
4078 { 0x2804, 0, 0x00000000, 0x00003f01 },
4079 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4080 { 0x2810, 0, 0xffff0000, 0x00000000 },
4081 { 0x2814, 0, 0xffff0000, 0x00000000 },
4082 { 0x2818, 0, 0xffff0000, 0x00000000 },
4083 { 0x281c, 0, 0xffff0000, 0x00000000 },
4084 { 0x2834, 0, 0xffffffff, 0x00000000 },
4085 { 0x2840, 0, 0x00000000, 0xffffffff },
4086 { 0x2844, 0, 0x00000000, 0xffffffff },
4087 { 0x2848, 0, 0xffffffff, 0x00000000 },
4088 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4089
4090 { 0x2c00, 0, 0x00000000, 0x00000011 },
4091 { 0x2c04, 0, 0x00000000, 0x00030007 },
4092
b6016b76
MC
4093 { 0x3c00, 0, 0x00000000, 0x00000001 },
4094 { 0x3c04, 0, 0x00000000, 0x00070000 },
4095 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4096 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4097 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4098 { 0x3c14, 0, 0x00000000, 0xffffffff },
4099 { 0x3c18, 0, 0x00000000, 0xffffffff },
4100 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4101 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
4102
4103 { 0x5004, 0, 0x00000000, 0x0000007f },
4104 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 4105
b6016b76
MC
4106 { 0x5c00, 0, 0x00000000, 0x00000001 },
4107 { 0x5c04, 0, 0x00000000, 0x0003000f },
4108 { 0x5c08, 0, 0x00000003, 0x00000000 },
4109 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4110 { 0x5c10, 0, 0x00000000, 0xffffffff },
4111 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4112 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4113 { 0x5c88, 0, 0x00000000, 0x00077373 },
4114 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4115
4116 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4117 { 0x680c, 0, 0xffffffff, 0x00000000 },
4118 { 0x6810, 0, 0xffffffff, 0x00000000 },
4119 { 0x6814, 0, 0xffffffff, 0x00000000 },
4120 { 0x6818, 0, 0xffffffff, 0x00000000 },
4121 { 0x681c, 0, 0xffffffff, 0x00000000 },
4122 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4123 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4124 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4125 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4126 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4127 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4128 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4129 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4130 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4131 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4132 { 0x684c, 0, 0xffffffff, 0x00000000 },
4133 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4134 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4135 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4136 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4137 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4138 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4139
4140 { 0xffff, 0, 0x00000000, 0x00000000 },
4141 };
4142
4143 ret = 0;
5bae30c9
MC
4144 is_5709 = 0;
4145 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4146 is_5709 = 1;
4147
b6016b76
MC
4148 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4149 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
4150 u16 flags = reg_tbl[i].flags;
4151
4152 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4153 continue;
b6016b76
MC
4154
4155 offset = (u32) reg_tbl[i].offset;
4156 rw_mask = reg_tbl[i].rw_mask;
4157 ro_mask = reg_tbl[i].ro_mask;
4158
14ab9b86 4159 save_val = readl(bp->regview + offset);
b6016b76 4160
14ab9b86 4161 writel(0, bp->regview + offset);
b6016b76 4162
14ab9b86 4163 val = readl(bp->regview + offset);
b6016b76
MC
4164 if ((val & rw_mask) != 0) {
4165 goto reg_test_err;
4166 }
4167
4168 if ((val & ro_mask) != (save_val & ro_mask)) {
4169 goto reg_test_err;
4170 }
4171
14ab9b86 4172 writel(0xffffffff, bp->regview + offset);
b6016b76 4173
14ab9b86 4174 val = readl(bp->regview + offset);
b6016b76
MC
4175 if ((val & rw_mask) != rw_mask) {
4176 goto reg_test_err;
4177 }
4178
4179 if ((val & ro_mask) != (save_val & ro_mask)) {
4180 goto reg_test_err;
4181 }
4182
14ab9b86 4183 writel(save_val, bp->regview + offset);
b6016b76
MC
4184 continue;
4185
4186reg_test_err:
14ab9b86 4187 writel(save_val, bp->regview + offset);
b6016b76
MC
4188 ret = -ENODEV;
4189 break;
4190 }
4191 return ret;
4192}
4193
4194static int
4195bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4196{
f71e1309 4197 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
4198 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4199 int i;
4200
4201 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4202 u32 offset;
4203
4204 for (offset = 0; offset < size; offset += 4) {
4205
4206 REG_WR_IND(bp, start + offset, test_pattern[i]);
4207
4208 if (REG_RD_IND(bp, start + offset) !=
4209 test_pattern[i]) {
4210 return -ENODEV;
4211 }
4212 }
4213 }
4214 return 0;
4215}
4216
4217static int
4218bnx2_test_memory(struct bnx2 *bp)
4219{
4220 int ret = 0;
4221 int i;
5bae30c9 4222 static struct mem_entry {
b6016b76
MC
4223 u32 offset;
4224 u32 len;
5bae30c9 4225 } mem_tbl_5706[] = {
b6016b76 4226 { 0x60000, 0x4000 },
5b0c76ad 4227 { 0xa0000, 0x3000 },
b6016b76
MC
4228 { 0xe0000, 0x4000 },
4229 { 0x120000, 0x4000 },
4230 { 0x1a0000, 0x4000 },
4231 { 0x160000, 0x4000 },
4232 { 0xffffffff, 0 },
5bae30c9
MC
4233 },
4234 mem_tbl_5709[] = {
4235 { 0x60000, 0x4000 },
4236 { 0xa0000, 0x3000 },
4237 { 0xe0000, 0x4000 },
4238 { 0x120000, 0x4000 },
4239 { 0x1a0000, 0x4000 },
4240 { 0xffffffff, 0 },
b6016b76 4241 };
5bae30c9
MC
4242 struct mem_entry *mem_tbl;
4243
4244 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4245 mem_tbl = mem_tbl_5709;
4246 else
4247 mem_tbl = mem_tbl_5706;
b6016b76
MC
4248
4249 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4250 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4251 mem_tbl[i].len)) != 0) {
4252 return ret;
4253 }
4254 }
6aa20a22 4255
b6016b76
MC
4256 return ret;
4257}
4258
bc5a0690
MC
4259#define BNX2_MAC_LOOPBACK 0
4260#define BNX2_PHY_LOOPBACK 1
4261
b6016b76 4262static int
bc5a0690 4263bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
4264{
4265 unsigned int pkt_size, num_pkts, i;
4266 struct sk_buff *skb, *rx_skb;
4267 unsigned char *packet;
bc5a0690 4268 u16 rx_start_idx, rx_idx;
b6016b76
MC
4269 dma_addr_t map;
4270 struct tx_bd *txbd;
4271 struct sw_bd *rx_buf;
4272 struct l2_fhdr *rx_hdr;
4273 int ret = -ENODEV;
4274
bc5a0690
MC
4275 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4276 bp->loopback = MAC_LOOPBACK;
4277 bnx2_set_mac_loopback(bp);
4278 }
4279 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
80be4434 4280 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
4281 bnx2_set_phy_loopback(bp);
4282 }
4283 else
4284 return -EINVAL;
b6016b76
MC
4285
4286 pkt_size = 1514;
932f3772 4287 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
4288 if (!skb)
4289 return -ENOMEM;
b6016b76 4290 packet = skb_put(skb, pkt_size);
6634292b 4291 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
4292 memset(packet + 6, 0x0, 8);
4293 for (i = 14; i < pkt_size; i++)
4294 packet[i] = (unsigned char) (i & 0xff);
4295
4296 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4297 PCI_DMA_TODEVICE);
4298
bf5295bb
MC
4299 REG_WR(bp, BNX2_HC_COMMAND,
4300 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4301
b6016b76
MC
4302 REG_RD(bp, BNX2_HC_COMMAND);
4303
4304 udelay(5);
4305 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4306
b6016b76
MC
4307 num_pkts = 0;
4308
bc5a0690 4309 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
4310
4311 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4312 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4313 txbd->tx_bd_mss_nbytes = pkt_size;
4314 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4315
4316 num_pkts++;
bc5a0690
MC
4317 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4318 bp->tx_prod_bseq += pkt_size;
b6016b76 4319
234754d5
MC
4320 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4321 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
4322
4323 udelay(100);
4324
bf5295bb
MC
4325 REG_WR(bp, BNX2_HC_COMMAND,
4326 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4327
b6016b76
MC
4328 REG_RD(bp, BNX2_HC_COMMAND);
4329
4330 udelay(5);
4331
4332 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 4333 dev_kfree_skb(skb);
b6016b76 4334
bc5a0690 4335 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
b6016b76
MC
4336 goto loopback_test_done;
4337 }
4338
4339 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4340 if (rx_idx != rx_start_idx + num_pkts) {
4341 goto loopback_test_done;
4342 }
4343
4344 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4345 rx_skb = rx_buf->skb;
4346
4347 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4348 skb_reserve(rx_skb, bp->rx_offset);
4349
4350 pci_dma_sync_single_for_cpu(bp->pdev,
4351 pci_unmap_addr(rx_buf, mapping),
4352 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4353
ade2bfe7 4354 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
4355 (L2_FHDR_ERRORS_BAD_CRC |
4356 L2_FHDR_ERRORS_PHY_DECODE |
4357 L2_FHDR_ERRORS_ALIGNMENT |
4358 L2_FHDR_ERRORS_TOO_SHORT |
4359 L2_FHDR_ERRORS_GIANT_FRAME)) {
4360
4361 goto loopback_test_done;
4362 }
4363
4364 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4365 goto loopback_test_done;
4366 }
4367
4368 for (i = 14; i < pkt_size; i++) {
4369 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4370 goto loopback_test_done;
4371 }
4372 }
4373
4374 ret = 0;
4375
4376loopback_test_done:
4377 bp->loopback = 0;
4378 return ret;
4379}
4380
bc5a0690
MC
4381#define BNX2_MAC_LOOPBACK_FAILED 1
4382#define BNX2_PHY_LOOPBACK_FAILED 2
4383#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4384 BNX2_PHY_LOOPBACK_FAILED)
4385
4386static int
4387bnx2_test_loopback(struct bnx2 *bp)
4388{
4389 int rc = 0;
4390
4391 if (!netif_running(bp->dev))
4392 return BNX2_LOOPBACK_FAILED;
4393
4394 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4395 spin_lock_bh(&bp->phy_lock);
4396 bnx2_init_phy(bp);
4397 spin_unlock_bh(&bp->phy_lock);
4398 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4399 rc |= BNX2_MAC_LOOPBACK_FAILED;
4400 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4401 rc |= BNX2_PHY_LOOPBACK_FAILED;
4402 return rc;
4403}
4404
b6016b76
MC
4405#define NVRAM_SIZE 0x200
4406#define CRC32_RESIDUAL 0xdebb20e3
4407
4408static int
4409bnx2_test_nvram(struct bnx2 *bp)
4410{
4411 u32 buf[NVRAM_SIZE / 4];
4412 u8 *data = (u8 *) buf;
4413 int rc = 0;
4414 u32 magic, csum;
4415
4416 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4417 goto test_nvram_done;
4418
4419 magic = be32_to_cpu(buf[0]);
4420 if (magic != 0x669955aa) {
4421 rc = -ENODEV;
4422 goto test_nvram_done;
4423 }
4424
4425 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4426 goto test_nvram_done;
4427
4428 csum = ether_crc_le(0x100, data);
4429 if (csum != CRC32_RESIDUAL) {
4430 rc = -ENODEV;
4431 goto test_nvram_done;
4432 }
4433
4434 csum = ether_crc_le(0x100, data + 0x100);
4435 if (csum != CRC32_RESIDUAL) {
4436 rc = -ENODEV;
4437 }
4438
4439test_nvram_done:
4440 return rc;
4441}
4442
4443static int
4444bnx2_test_link(struct bnx2 *bp)
4445{
4446 u32 bmsr;
4447
c770a65c 4448 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
4449 bnx2_enable_bmsr1(bp);
4450 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4451 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4452 bnx2_disable_bmsr1(bp);
c770a65c 4453 spin_unlock_bh(&bp->phy_lock);
6aa20a22 4454
b6016b76
MC
4455 if (bmsr & BMSR_LSTATUS) {
4456 return 0;
4457 }
4458 return -ENODEV;
4459}
4460
4461static int
4462bnx2_test_intr(struct bnx2 *bp)
4463{
4464 int i;
b6016b76
MC
4465 u16 status_idx;
4466
4467 if (!netif_running(bp->dev))
4468 return -ENODEV;
4469
4470 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4471
4472 /* This register is not touched during run-time. */
bf5295bb 4473 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
4474 REG_RD(bp, BNX2_HC_COMMAND);
4475
4476 for (i = 0; i < 10; i++) {
4477 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4478 status_idx) {
4479
4480 break;
4481 }
4482
4483 msleep_interruptible(10);
4484 }
4485 if (i < 10)
4486 return 0;
4487
4488 return -ENODEV;
4489}
4490
4491static void
48b01e2d 4492bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 4493{
48b01e2d
MC
4494 spin_lock(&bp->phy_lock);
4495 if (bp->serdes_an_pending)
4496 bp->serdes_an_pending--;
4497 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4498 u32 bmcr;
b6016b76 4499
48b01e2d 4500 bp->current_interval = bp->timer_interval;
cd339a0e 4501
ca58c3af 4502 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 4503
48b01e2d
MC
4504 if (bmcr & BMCR_ANENABLE) {
4505 u32 phy1, phy2;
b6016b76 4506
48b01e2d
MC
4507 bnx2_write_phy(bp, 0x1c, 0x7c00);
4508 bnx2_read_phy(bp, 0x1c, &phy1);
cea94db9 4509
48b01e2d
MC
4510 bnx2_write_phy(bp, 0x17, 0x0f01);
4511 bnx2_read_phy(bp, 0x15, &phy2);
4512 bnx2_write_phy(bp, 0x17, 0x0f01);
4513 bnx2_read_phy(bp, 0x15, &phy2);
b6016b76 4514
48b01e2d
MC
4515 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4516 !(phy2 & 0x20)) { /* no CONFIG */
4517
4518 bmcr &= ~BMCR_ANENABLE;
4519 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 4520 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
48b01e2d
MC
4521 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4522 }
b6016b76 4523 }
48b01e2d
MC
4524 }
4525 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4526 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4527 u32 phy2;
b6016b76 4528
48b01e2d
MC
4529 bnx2_write_phy(bp, 0x17, 0x0f01);
4530 bnx2_read_phy(bp, 0x15, &phy2);
4531 if (phy2 & 0x20) {
4532 u32 bmcr;
cd339a0e 4533
ca58c3af 4534 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 4535 bmcr |= BMCR_ANENABLE;
ca58c3af 4536 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 4537
48b01e2d
MC
4538 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4539 }
4540 } else
4541 bp->current_interval = bp->timer_interval;
b6016b76 4542
48b01e2d
MC
4543 spin_unlock(&bp->phy_lock);
4544}
b6016b76 4545
f8dd064e
MC
4546static void
4547bnx2_5708_serdes_timer(struct bnx2 *bp)
4548{
4549 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4550 bp->serdes_an_pending = 0;
4551 return;
4552 }
b6016b76 4553
f8dd064e
MC
4554 spin_lock(&bp->phy_lock);
4555 if (bp->serdes_an_pending)
4556 bp->serdes_an_pending--;
4557 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4558 u32 bmcr;
b6016b76 4559
ca58c3af 4560 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 4561 if (bmcr & BMCR_ANENABLE) {
605a9e20 4562 bnx2_enable_forced_2g5(bp);
f8dd064e
MC
4563 bp->current_interval = SERDES_FORCED_TIMEOUT;
4564 } else {
605a9e20 4565 bnx2_disable_forced_2g5(bp);
f8dd064e
MC
4566 bp->serdes_an_pending = 2;
4567 bp->current_interval = bp->timer_interval;
b6016b76 4568 }
b6016b76 4569
f8dd064e
MC
4570 } else
4571 bp->current_interval = bp->timer_interval;
b6016b76 4572
f8dd064e
MC
4573 spin_unlock(&bp->phy_lock);
4574}
4575
48b01e2d
MC
4576static void
4577bnx2_timer(unsigned long data)
4578{
4579 struct bnx2 *bp = (struct bnx2 *) data;
4580 u32 msg;
b6016b76 4581
48b01e2d
MC
4582 if (!netif_running(bp->dev))
4583 return;
b6016b76 4584
48b01e2d
MC
4585 if (atomic_read(&bp->intr_sem) != 0)
4586 goto bnx2_restart_timer;
b6016b76 4587
48b01e2d
MC
4588 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4589 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
b6016b76 4590
48b01e2d 4591 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 4592
f8dd064e
MC
4593 if (bp->phy_flags & PHY_SERDES_FLAG) {
4594 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4595 bnx2_5706_serdes_timer(bp);
27a005b8 4596 else
f8dd064e 4597 bnx2_5708_serdes_timer(bp);
b6016b76
MC
4598 }
4599
4600bnx2_restart_timer:
cd339a0e 4601 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4602}
4603
4604/* Called with rtnl_lock */
4605static int
4606bnx2_open(struct net_device *dev)
4607{
972ec0d4 4608 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4609 int rc;
4610
1b2f922f
MC
4611 netif_carrier_off(dev);
4612
829ca9a3 4613 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
4614 bnx2_disable_int(bp);
4615
4616 rc = bnx2_alloc_mem(bp);
4617 if (rc)
4618 return rc;
4619
4620 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4621 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4622 !disable_msi) {
4623
4624 if (pci_enable_msi(bp->pdev) == 0) {
4625 bp->flags |= USING_MSI_FLAG;
4626 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4627 dev);
4628 }
4629 else {
4630 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
1fb9df5d 4631 IRQF_SHARED, dev->name, dev);
b6016b76
MC
4632 }
4633 }
4634 else {
1fb9df5d 4635 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
b6016b76
MC
4636 dev->name, dev);
4637 }
4638 if (rc) {
4639 bnx2_free_mem(bp);
4640 return rc;
4641 }
4642
4643 rc = bnx2_init_nic(bp);
4644
4645 if (rc) {
4646 free_irq(bp->pdev->irq, dev);
4647 if (bp->flags & USING_MSI_FLAG) {
4648 pci_disable_msi(bp->pdev);
4649 bp->flags &= ~USING_MSI_FLAG;
4650 }
4651 bnx2_free_skbs(bp);
4652 bnx2_free_mem(bp);
4653 return rc;
4654 }
6aa20a22 4655
cd339a0e 4656 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4657
4658 atomic_set(&bp->intr_sem, 0);
4659
4660 bnx2_enable_int(bp);
4661
4662 if (bp->flags & USING_MSI_FLAG) {
4663 /* Test MSI to make sure it is working
4664 * If MSI test fails, go back to INTx mode
4665 */
4666 if (bnx2_test_intr(bp) != 0) {
4667 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4668 " using MSI, switching to INTx mode. Please"
4669 " report this failure to the PCI maintainer"
4670 " and include system chipset information.\n",
4671 bp->dev->name);
4672
4673 bnx2_disable_int(bp);
4674 free_irq(bp->pdev->irq, dev);
4675 pci_disable_msi(bp->pdev);
4676 bp->flags &= ~USING_MSI_FLAG;
4677
4678 rc = bnx2_init_nic(bp);
4679
4680 if (!rc) {
4681 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
1fb9df5d 4682 IRQF_SHARED, dev->name, dev);
b6016b76
MC
4683 }
4684 if (rc) {
4685 bnx2_free_skbs(bp);
4686 bnx2_free_mem(bp);
4687 del_timer_sync(&bp->timer);
4688 return rc;
4689 }
4690 bnx2_enable_int(bp);
4691 }
4692 }
4693 if (bp->flags & USING_MSI_FLAG) {
4694 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4695 }
4696
4697 netif_start_queue(dev);
4698
4699 return 0;
4700}
4701
4702static void
c4028958 4703bnx2_reset_task(struct work_struct *work)
b6016b76 4704{
c4028958 4705 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 4706
afdc08b9
MC
4707 if (!netif_running(bp->dev))
4708 return;
4709
4710 bp->in_reset_task = 1;
b6016b76
MC
4711 bnx2_netif_stop(bp);
4712
4713 bnx2_init_nic(bp);
4714
4715 atomic_set(&bp->intr_sem, 1);
4716 bnx2_netif_start(bp);
afdc08b9 4717 bp->in_reset_task = 0;
b6016b76
MC
4718}
4719
4720static void
4721bnx2_tx_timeout(struct net_device *dev)
4722{
972ec0d4 4723 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4724
4725 /* This allows the netif to be shutdown gracefully before resetting */
4726 schedule_work(&bp->reset_task);
4727}
4728
4729#ifdef BCM_VLAN
4730/* Called with rtnl_lock */
4731static void
4732bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4733{
972ec0d4 4734 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4735
4736 bnx2_netif_stop(bp);
4737
4738 bp->vlgrp = vlgrp;
4739 bnx2_set_rx_mode(dev);
4740
4741 bnx2_netif_start(bp);
4742}
4743
4744/* Called with rtnl_lock */
4745static void
4746bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4747{
972ec0d4 4748 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4749
4750 bnx2_netif_stop(bp);
5c15bdec 4751 vlan_group_set_device(bp->vlgrp, vid, NULL);
b6016b76
MC
4752 bnx2_set_rx_mode(dev);
4753
4754 bnx2_netif_start(bp);
4755}
4756#endif
4757
932ff279 4758/* Called with netif_tx_lock.
2f8af120
MC
4759 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4760 * netif_wake_queue().
b6016b76
MC
4761 */
4762static int
4763bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4764{
972ec0d4 4765 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4766 dma_addr_t mapping;
4767 struct tx_bd *txbd;
4768 struct sw_bd *tx_buf;
4769 u32 len, vlan_tag_flags, last_frag, mss;
4770 u16 prod, ring_prod;
4771 int i;
4772
e89bbf10 4773 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
4774 netif_stop_queue(dev);
4775 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4776 dev->name);
4777
4778 return NETDEV_TX_BUSY;
4779 }
4780 len = skb_headlen(skb);
4781 prod = bp->tx_prod;
4782 ring_prod = TX_RING_IDX(prod);
4783
4784 vlan_tag_flags = 0;
84fa7933 4785 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
4786 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4787 }
4788
4789 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4790 vlan_tag_flags |=
4791 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4792 }
7967168c 4793 if ((mss = skb_shinfo(skb)->gso_size) &&
b6016b76
MC
4794 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4795 u32 tcp_opt_len, ip_tcp_len;
eddc9ec5 4796 struct iphdr *iph;
b6016b76 4797
b6016b76
MC
4798 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4799
4666f87a
MC
4800 tcp_opt_len = tcp_optlen(skb);
4801
4802 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4803 u32 tcp_off = skb_transport_offset(skb) -
4804 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 4805
4666f87a
MC
4806 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4807 TX_BD_FLAGS_SW_FLAGS;
4808 if (likely(tcp_off == 0))
4809 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4810 else {
4811 tcp_off >>= 3;
4812 vlan_tag_flags |= ((tcp_off & 0x3) <<
4813 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4814 ((tcp_off & 0x10) <<
4815 TX_BD_FLAGS_TCP6_OFF4_SHL);
4816 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4817 }
4818 } else {
4819 if (skb_header_cloned(skb) &&
4820 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4821 dev_kfree_skb(skb);
4822 return NETDEV_TX_OK;
4823 }
b6016b76 4824
4666f87a
MC
4825 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4826
4827 iph = ip_hdr(skb);
4828 iph->check = 0;
4829 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4830 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4831 iph->daddr, 0,
4832 IPPROTO_TCP,
4833 0);
4834 if (tcp_opt_len || (iph->ihl > 5)) {
4835 vlan_tag_flags |= ((iph->ihl - 5) +
4836 (tcp_opt_len >> 2)) << 8;
4837 }
b6016b76 4838 }
4666f87a 4839 } else
b6016b76 4840 mss = 0;
b6016b76
MC
4841
4842 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 4843
b6016b76
MC
4844 tx_buf = &bp->tx_buf_ring[ring_prod];
4845 tx_buf->skb = skb;
4846 pci_unmap_addr_set(tx_buf, mapping, mapping);
4847
4848 txbd = &bp->tx_desc_ring[ring_prod];
4849
4850 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4851 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4852 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4853 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4854
4855 last_frag = skb_shinfo(skb)->nr_frags;
4856
4857 for (i = 0; i < last_frag; i++) {
4858 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4859
4860 prod = NEXT_TX_BD(prod);
4861 ring_prod = TX_RING_IDX(prod);
4862 txbd = &bp->tx_desc_ring[ring_prod];
4863
4864 len = frag->size;
4865 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4866 len, PCI_DMA_TODEVICE);
4867 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4868 mapping, mapping);
4869
4870 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4871 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4872 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4873 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4874
4875 }
4876 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4877
4878 prod = NEXT_TX_BD(prod);
4879 bp->tx_prod_bseq += skb->len;
4880
234754d5
MC
4881 REG_WR16(bp, bp->tx_bidx_addr, prod);
4882 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
4883
4884 mmiowb();
4885
4886 bp->tx_prod = prod;
4887 dev->trans_start = jiffies;
4888
e89bbf10 4889 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
e89bbf10 4890 netif_stop_queue(dev);
2f8af120 4891 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
e89bbf10 4892 netif_wake_queue(dev);
b6016b76
MC
4893 }
4894
4895 return NETDEV_TX_OK;
4896}
4897
4898/* Called with rtnl_lock */
4899static int
4900bnx2_close(struct net_device *dev)
4901{
972ec0d4 4902 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4903 u32 reset_code;
4904
afdc08b9
MC
4905 /* Calling flush_scheduled_work() may deadlock because
4906 * linkwatch_event() may be on the workqueue and it will try to get
4907 * the rtnl_lock which we are holding.
4908 */
4909 while (bp->in_reset_task)
4910 msleep(1);
4911
b6016b76
MC
4912 bnx2_netif_stop(bp);
4913 del_timer_sync(&bp->timer);
dda1e390 4914 if (bp->flags & NO_WOL_FLAG)
6c4f095e 4915 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 4916 else if (bp->wol)
b6016b76
MC
4917 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4918 else
4919 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4920 bnx2_reset_chip(bp, reset_code);
4921 free_irq(bp->pdev->irq, dev);
4922 if (bp->flags & USING_MSI_FLAG) {
4923 pci_disable_msi(bp->pdev);
4924 bp->flags &= ~USING_MSI_FLAG;
4925 }
4926 bnx2_free_skbs(bp);
4927 bnx2_free_mem(bp);
4928 bp->link_up = 0;
4929 netif_carrier_off(bp->dev);
829ca9a3 4930 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
4931 return 0;
4932}
4933
4934#define GET_NET_STATS64(ctr) \
4935 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4936 (unsigned long) (ctr##_lo)
4937
4938#define GET_NET_STATS32(ctr) \
4939 (ctr##_lo)
4940
4941#if (BITS_PER_LONG == 64)
4942#define GET_NET_STATS GET_NET_STATS64
4943#else
4944#define GET_NET_STATS GET_NET_STATS32
4945#endif
4946
4947static struct net_device_stats *
4948bnx2_get_stats(struct net_device *dev)
4949{
972ec0d4 4950 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4951 struct statistics_block *stats_blk = bp->stats_blk;
4952 struct net_device_stats *net_stats = &bp->net_stats;
4953
4954 if (bp->stats_blk == NULL) {
4955 return net_stats;
4956 }
4957 net_stats->rx_packets =
4958 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4959 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4960 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4961
4962 net_stats->tx_packets =
4963 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4964 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4965 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4966
4967 net_stats->rx_bytes =
4968 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4969
4970 net_stats->tx_bytes =
4971 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4972
6aa20a22 4973 net_stats->multicast =
b6016b76
MC
4974 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4975
6aa20a22 4976 net_stats->collisions =
b6016b76
MC
4977 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4978
6aa20a22 4979 net_stats->rx_length_errors =
b6016b76
MC
4980 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4981 stats_blk->stat_EtherStatsOverrsizePkts);
4982
6aa20a22 4983 net_stats->rx_over_errors =
b6016b76
MC
4984 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4985
6aa20a22 4986 net_stats->rx_frame_errors =
b6016b76
MC
4987 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4988
6aa20a22 4989 net_stats->rx_crc_errors =
b6016b76
MC
4990 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4991
4992 net_stats->rx_errors = net_stats->rx_length_errors +
4993 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4994 net_stats->rx_crc_errors;
4995
4996 net_stats->tx_aborted_errors =
4997 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4998 stats_blk->stat_Dot3StatsLateCollisions);
4999
5b0c76ad
MC
5000 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5001 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
5002 net_stats->tx_carrier_errors = 0;
5003 else {
5004 net_stats->tx_carrier_errors =
5005 (unsigned long)
5006 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5007 }
5008
5009 net_stats->tx_errors =
6aa20a22 5010 (unsigned long)
b6016b76
MC
5011 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5012 +
5013 net_stats->tx_aborted_errors +
5014 net_stats->tx_carrier_errors;
5015
cea94db9
MC
5016 net_stats->rx_missed_errors =
5017 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5018 stats_blk->stat_FwRxDrop);
5019
b6016b76
MC
5020 return net_stats;
5021}
5022
5023/* All ethtool functions called with rtnl_lock */
5024
5025static int
5026bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5027{
972ec0d4 5028 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5029
5030 cmd->supported = SUPPORTED_Autoneg;
5031 if (bp->phy_flags & PHY_SERDES_FLAG) {
5032 cmd->supported |= SUPPORTED_1000baseT_Full |
5033 SUPPORTED_FIBRE;
605a9e20
MC
5034 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5035 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76
MC
5036
5037 cmd->port = PORT_FIBRE;
5038 }
5039 else {
5040 cmd->supported |= SUPPORTED_10baseT_Half |
5041 SUPPORTED_10baseT_Full |
5042 SUPPORTED_100baseT_Half |
5043 SUPPORTED_100baseT_Full |
5044 SUPPORTED_1000baseT_Full |
5045 SUPPORTED_TP;
5046
5047 cmd->port = PORT_TP;
5048 }
5049
5050 cmd->advertising = bp->advertising;
5051
5052 if (bp->autoneg & AUTONEG_SPEED) {
5053 cmd->autoneg = AUTONEG_ENABLE;
5054 }
5055 else {
5056 cmd->autoneg = AUTONEG_DISABLE;
5057 }
5058
5059 if (netif_carrier_ok(dev)) {
5060 cmd->speed = bp->line_speed;
5061 cmd->duplex = bp->duplex;
5062 }
5063 else {
5064 cmd->speed = -1;
5065 cmd->duplex = -1;
5066 }
5067
5068 cmd->transceiver = XCVR_INTERNAL;
5069 cmd->phy_address = bp->phy_addr;
5070
5071 return 0;
5072}
6aa20a22 5073
b6016b76
MC
5074static int
5075bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5076{
972ec0d4 5077 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5078 u8 autoneg = bp->autoneg;
5079 u8 req_duplex = bp->req_duplex;
5080 u16 req_line_speed = bp->req_line_speed;
5081 u32 advertising = bp->advertising;
5082
5083 if (cmd->autoneg == AUTONEG_ENABLE) {
5084 autoneg |= AUTONEG_SPEED;
5085
6aa20a22 5086 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5087
5088 /* allow advertising 1 speed */
5089 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5090 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5091 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5092 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5093
5094 if (bp->phy_flags & PHY_SERDES_FLAG)
5095 return -EINVAL;
5096
5097 advertising = cmd->advertising;
5098
27a005b8
MC
5099 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5100 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5101 return -EINVAL;
5102 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
b6016b76
MC
5103 advertising = cmd->advertising;
5104 }
5105 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5106 return -EINVAL;
5107 }
5108 else {
5109 if (bp->phy_flags & PHY_SERDES_FLAG) {
5110 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5111 }
5112 else {
5113 advertising = ETHTOOL_ALL_COPPER_SPEED;
5114 }
5115 }
5116 advertising |= ADVERTISED_Autoneg;
5117 }
5118 else {
5119 if (bp->phy_flags & PHY_SERDES_FLAG) {
80be4434
MC
5120 if ((cmd->speed != SPEED_1000 &&
5121 cmd->speed != SPEED_2500) ||
5122 (cmd->duplex != DUPLEX_FULL))
5123 return -EINVAL;
5124
5125 if (cmd->speed == SPEED_2500 &&
5126 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
b6016b76 5127 return -EINVAL;
b6016b76
MC
5128 }
5129 else if (cmd->speed == SPEED_1000) {
5130 return -EINVAL;
5131 }
5132 autoneg &= ~AUTONEG_SPEED;
5133 req_line_speed = cmd->speed;
5134 req_duplex = cmd->duplex;
5135 advertising = 0;
5136 }
5137
5138 bp->autoneg = autoneg;
5139 bp->advertising = advertising;
5140 bp->req_line_speed = req_line_speed;
5141 bp->req_duplex = req_duplex;
5142
c770a65c 5143 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
5144
5145 bnx2_setup_phy(bp);
5146
c770a65c 5147 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5148
5149 return 0;
5150}
5151
5152static void
5153bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5154{
972ec0d4 5155 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5156
5157 strcpy(info->driver, DRV_MODULE_NAME);
5158 strcpy(info->version, DRV_MODULE_VERSION);
5159 strcpy(info->bus_info, pci_name(bp->pdev));
5160 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5161 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5162 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
206cc83c
MC
5163 info->fw_version[1] = info->fw_version[3] = '.';
5164 info->fw_version[5] = 0;
b6016b76
MC
5165}
5166
244ac4f4
MC
5167#define BNX2_REGDUMP_LEN (32 * 1024)
5168
5169static int
5170bnx2_get_regs_len(struct net_device *dev)
5171{
5172 return BNX2_REGDUMP_LEN;
5173}
5174
5175static void
5176bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5177{
5178 u32 *p = _p, i, offset;
5179 u8 *orig_p = _p;
5180 struct bnx2 *bp = netdev_priv(dev);
5181 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5182 0x0800, 0x0880, 0x0c00, 0x0c10,
5183 0x0c30, 0x0d08, 0x1000, 0x101c,
5184 0x1040, 0x1048, 0x1080, 0x10a4,
5185 0x1400, 0x1490, 0x1498, 0x14f0,
5186 0x1500, 0x155c, 0x1580, 0x15dc,
5187 0x1600, 0x1658, 0x1680, 0x16d8,
5188 0x1800, 0x1820, 0x1840, 0x1854,
5189 0x1880, 0x1894, 0x1900, 0x1984,
5190 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5191 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5192 0x2000, 0x2030, 0x23c0, 0x2400,
5193 0x2800, 0x2820, 0x2830, 0x2850,
5194 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5195 0x3c00, 0x3c94, 0x4000, 0x4010,
5196 0x4080, 0x4090, 0x43c0, 0x4458,
5197 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5198 0x4fc0, 0x5010, 0x53c0, 0x5444,
5199 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5200 0x5fc0, 0x6000, 0x6400, 0x6428,
5201 0x6800, 0x6848, 0x684c, 0x6860,
5202 0x6888, 0x6910, 0x8000 };
5203
5204 regs->version = 0;
5205
5206 memset(p, 0, BNX2_REGDUMP_LEN);
5207
5208 if (!netif_running(bp->dev))
5209 return;
5210
5211 i = 0;
5212 offset = reg_boundaries[0];
5213 p += offset;
5214 while (offset < BNX2_REGDUMP_LEN) {
5215 *p++ = REG_RD(bp, offset);
5216 offset += 4;
5217 if (offset == reg_boundaries[i + 1]) {
5218 offset = reg_boundaries[i + 2];
5219 p = (u32 *) (orig_p + offset);
5220 i += 2;
5221 }
5222 }
5223}
5224
b6016b76
MC
5225static void
5226bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5227{
972ec0d4 5228 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5229
5230 if (bp->flags & NO_WOL_FLAG) {
5231 wol->supported = 0;
5232 wol->wolopts = 0;
5233 }
5234 else {
5235 wol->supported = WAKE_MAGIC;
5236 if (bp->wol)
5237 wol->wolopts = WAKE_MAGIC;
5238 else
5239 wol->wolopts = 0;
5240 }
5241 memset(&wol->sopass, 0, sizeof(wol->sopass));
5242}
5243
5244static int
5245bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5246{
972ec0d4 5247 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5248
5249 if (wol->wolopts & ~WAKE_MAGIC)
5250 return -EINVAL;
5251
5252 if (wol->wolopts & WAKE_MAGIC) {
5253 if (bp->flags & NO_WOL_FLAG)
5254 return -EINVAL;
5255
5256 bp->wol = 1;
5257 }
5258 else {
5259 bp->wol = 0;
5260 }
5261 return 0;
5262}
5263
5264static int
5265bnx2_nway_reset(struct net_device *dev)
5266{
972ec0d4 5267 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5268 u32 bmcr;
5269
5270 if (!(bp->autoneg & AUTONEG_SPEED)) {
5271 return -EINVAL;
5272 }
5273
c770a65c 5274 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
5275
5276 /* Force a link down visible on the other side */
5277 if (bp->phy_flags & PHY_SERDES_FLAG) {
ca58c3af 5278 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 5279 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5280
5281 msleep(20);
5282
c770a65c 5283 spin_lock_bh(&bp->phy_lock);
f8dd064e
MC
5284
5285 bp->current_interval = SERDES_AN_TIMEOUT;
5286 bp->serdes_an_pending = 1;
5287 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5288 }
5289
ca58c3af 5290 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5291 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 5292 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 5293
c770a65c 5294 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5295
5296 return 0;
5297}
5298
5299static int
5300bnx2_get_eeprom_len(struct net_device *dev)
5301{
972ec0d4 5302 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5303
1122db71 5304 if (bp->flash_info == NULL)
b6016b76
MC
5305 return 0;
5306
1122db71 5307 return (int) bp->flash_size;
b6016b76
MC
5308}
5309
5310static int
5311bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5312 u8 *eebuf)
5313{
972ec0d4 5314 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5315 int rc;
5316
1064e944 5317 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
5318
5319 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5320
5321 return rc;
5322}
5323
5324static int
5325bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5326 u8 *eebuf)
5327{
972ec0d4 5328 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5329 int rc;
5330
1064e944 5331 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
5332
5333 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5334
5335 return rc;
5336}
5337
5338static int
5339bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5340{
972ec0d4 5341 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5342
5343 memset(coal, 0, sizeof(struct ethtool_coalesce));
5344
5345 coal->rx_coalesce_usecs = bp->rx_ticks;
5346 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5347 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5348 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5349
5350 coal->tx_coalesce_usecs = bp->tx_ticks;
5351 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5352 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5353 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5354
5355 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5356
5357 return 0;
5358}
5359
5360static int
5361bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5362{
972ec0d4 5363 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5364
5365 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5366 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5367
6aa20a22 5368 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
5369 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5370
5371 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5372 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5373
5374 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5375 if (bp->rx_quick_cons_trip_int > 0xff)
5376 bp->rx_quick_cons_trip_int = 0xff;
5377
5378 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5379 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5380
5381 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5382 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5383
5384 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5385 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5386
5387 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5388 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5389 0xff;
5390
5391 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5392 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5393 bp->stats_ticks &= 0xffff00;
5394
5395 if (netif_running(bp->dev)) {
5396 bnx2_netif_stop(bp);
5397 bnx2_init_nic(bp);
5398 bnx2_netif_start(bp);
5399 }
5400
5401 return 0;
5402}
5403
5404static void
5405bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5406{
972ec0d4 5407 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5408
13daffa2 5409 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76
MC
5410 ering->rx_mini_max_pending = 0;
5411 ering->rx_jumbo_max_pending = 0;
5412
5413 ering->rx_pending = bp->rx_ring_size;
5414 ering->rx_mini_pending = 0;
5415 ering->rx_jumbo_pending = 0;
5416
5417 ering->tx_max_pending = MAX_TX_DESC_CNT;
5418 ering->tx_pending = bp->tx_ring_size;
5419}
5420
5421static int
5422bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5423{
972ec0d4 5424 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5425
13daffa2 5426 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
b6016b76
MC
5427 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5428 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5429
5430 return -EINVAL;
5431 }
13daffa2
MC
5432 if (netif_running(bp->dev)) {
5433 bnx2_netif_stop(bp);
5434 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5435 bnx2_free_skbs(bp);
5436 bnx2_free_mem(bp);
5437 }
5438
5439 bnx2_set_rx_ring_size(bp, ering->rx_pending);
b6016b76
MC
5440 bp->tx_ring_size = ering->tx_pending;
5441
5442 if (netif_running(bp->dev)) {
13daffa2
MC
5443 int rc;
5444
5445 rc = bnx2_alloc_mem(bp);
5446 if (rc)
5447 return rc;
b6016b76
MC
5448 bnx2_init_nic(bp);
5449 bnx2_netif_start(bp);
5450 }
5451
5452 return 0;
5453}
5454
5455static void
5456bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5457{
972ec0d4 5458 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5459
5460 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5461 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5462 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5463}
5464
5465static int
5466bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5467{
972ec0d4 5468 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5469
5470 bp->req_flow_ctrl = 0;
5471 if (epause->rx_pause)
5472 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5473 if (epause->tx_pause)
5474 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5475
5476 if (epause->autoneg) {
5477 bp->autoneg |= AUTONEG_FLOW_CTRL;
5478 }
5479 else {
5480 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5481 }
5482
c770a65c 5483 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
5484
5485 bnx2_setup_phy(bp);
5486
c770a65c 5487 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5488
5489 return 0;
5490}
5491
5492static u32
5493bnx2_get_rx_csum(struct net_device *dev)
5494{
972ec0d4 5495 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5496
5497 return bp->rx_csum;
5498}
5499
5500static int
5501bnx2_set_rx_csum(struct net_device *dev, u32 data)
5502{
972ec0d4 5503 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5504
5505 bp->rx_csum = data;
5506 return 0;
5507}
5508
b11d6213
MC
5509static int
5510bnx2_set_tso(struct net_device *dev, u32 data)
5511{
4666f87a
MC
5512 struct bnx2 *bp = netdev_priv(dev);
5513
5514 if (data) {
b11d6213 5515 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
5516 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5517 dev->features |= NETIF_F_TSO6;
5518 } else
5519 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5520 NETIF_F_TSO_ECN);
b11d6213
MC
5521 return 0;
5522}
5523
cea94db9 5524#define BNX2_NUM_STATS 46
b6016b76 5525
14ab9b86 5526static struct {
b6016b76
MC
5527 char string[ETH_GSTRING_LEN];
5528} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5529 { "rx_bytes" },
5530 { "rx_error_bytes" },
5531 { "tx_bytes" },
5532 { "tx_error_bytes" },
5533 { "rx_ucast_packets" },
5534 { "rx_mcast_packets" },
5535 { "rx_bcast_packets" },
5536 { "tx_ucast_packets" },
5537 { "tx_mcast_packets" },
5538 { "tx_bcast_packets" },
5539 { "tx_mac_errors" },
5540 { "tx_carrier_errors" },
5541 { "rx_crc_errors" },
5542 { "rx_align_errors" },
5543 { "tx_single_collisions" },
5544 { "tx_multi_collisions" },
5545 { "tx_deferred" },
5546 { "tx_excess_collisions" },
5547 { "tx_late_collisions" },
5548 { "tx_total_collisions" },
5549 { "rx_fragments" },
5550 { "rx_jabbers" },
5551 { "rx_undersize_packets" },
5552 { "rx_oversize_packets" },
5553 { "rx_64_byte_packets" },
5554 { "rx_65_to_127_byte_packets" },
5555 { "rx_128_to_255_byte_packets" },
5556 { "rx_256_to_511_byte_packets" },
5557 { "rx_512_to_1023_byte_packets" },
5558 { "rx_1024_to_1522_byte_packets" },
5559 { "rx_1523_to_9022_byte_packets" },
5560 { "tx_64_byte_packets" },
5561 { "tx_65_to_127_byte_packets" },
5562 { "tx_128_to_255_byte_packets" },
5563 { "tx_256_to_511_byte_packets" },
5564 { "tx_512_to_1023_byte_packets" },
5565 { "tx_1024_to_1522_byte_packets" },
5566 { "tx_1523_to_9022_byte_packets" },
5567 { "rx_xon_frames" },
5568 { "rx_xoff_frames" },
5569 { "tx_xon_frames" },
5570 { "tx_xoff_frames" },
5571 { "rx_mac_ctrl_frames" },
5572 { "rx_filtered_packets" },
5573 { "rx_discards" },
cea94db9 5574 { "rx_fw_discards" },
b6016b76
MC
5575};
5576
5577#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5578
f71e1309 5579static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5580 STATS_OFFSET32(stat_IfHCInOctets_hi),
5581 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5582 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5583 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5584 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5585 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5586 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5587 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5588 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5589 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5590 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
5591 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5592 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5593 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5594 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5595 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5596 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5597 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5598 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5599 STATS_OFFSET32(stat_EtherStatsCollisions),
5600 STATS_OFFSET32(stat_EtherStatsFragments),
5601 STATS_OFFSET32(stat_EtherStatsJabbers),
5602 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5603 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5604 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5605 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5606 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5607 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5608 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5609 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5610 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5611 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5612 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5613 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5614 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5615 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5616 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5617 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5618 STATS_OFFSET32(stat_XonPauseFramesReceived),
5619 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5620 STATS_OFFSET32(stat_OutXonSent),
5621 STATS_OFFSET32(stat_OutXoffSent),
5622 STATS_OFFSET32(stat_MacControlFramesReceived),
5623 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5624 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 5625 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
5626};
5627
5628/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5629 * skipped because of errata.
6aa20a22 5630 */
14ab9b86 5631static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5632 8,0,8,8,8,8,8,8,8,8,
5633 4,0,4,4,4,4,4,4,4,4,
5634 4,4,4,4,4,4,4,4,4,4,
5635 4,4,4,4,4,4,4,4,4,4,
cea94db9 5636 4,4,4,4,4,4,
b6016b76
MC
5637};
5638
5b0c76ad
MC
5639static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5640 8,0,8,8,8,8,8,8,8,8,
5641 4,4,4,4,4,4,4,4,4,4,
5642 4,4,4,4,4,4,4,4,4,4,
5643 4,4,4,4,4,4,4,4,4,4,
cea94db9 5644 4,4,4,4,4,4,
5b0c76ad
MC
5645};
5646
b6016b76
MC
5647#define BNX2_NUM_TESTS 6
5648
14ab9b86 5649static struct {
b6016b76
MC
5650 char string[ETH_GSTRING_LEN];
5651} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5652 { "register_test (offline)" },
5653 { "memory_test (offline)" },
5654 { "loopback_test (offline)" },
5655 { "nvram_test (online)" },
5656 { "interrupt_test (online)" },
5657 { "link_test (online)" },
5658};
5659
5660static int
5661bnx2_self_test_count(struct net_device *dev)
5662{
5663 return BNX2_NUM_TESTS;
5664}
5665
5666static void
5667bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5668{
972ec0d4 5669 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5670
5671 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5672 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
5673 int i;
5674
b6016b76
MC
5675 bnx2_netif_stop(bp);
5676 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5677 bnx2_free_skbs(bp);
5678
5679 if (bnx2_test_registers(bp) != 0) {
5680 buf[0] = 1;
5681 etest->flags |= ETH_TEST_FL_FAILED;
5682 }
5683 if (bnx2_test_memory(bp) != 0) {
5684 buf[1] = 1;
5685 etest->flags |= ETH_TEST_FL_FAILED;
5686 }
bc5a0690 5687 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 5688 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
5689
5690 if (!netif_running(bp->dev)) {
5691 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5692 }
5693 else {
5694 bnx2_init_nic(bp);
5695 bnx2_netif_start(bp);
5696 }
5697
5698 /* wait for link up */
80be4434
MC
5699 for (i = 0; i < 7; i++) {
5700 if (bp->link_up)
5701 break;
5702 msleep_interruptible(1000);
5703 }
b6016b76
MC
5704 }
5705
5706 if (bnx2_test_nvram(bp) != 0) {
5707 buf[3] = 1;
5708 etest->flags |= ETH_TEST_FL_FAILED;
5709 }
5710 if (bnx2_test_intr(bp) != 0) {
5711 buf[4] = 1;
5712 etest->flags |= ETH_TEST_FL_FAILED;
5713 }
5714
5715 if (bnx2_test_link(bp) != 0) {
5716 buf[5] = 1;
5717 etest->flags |= ETH_TEST_FL_FAILED;
5718
5719 }
5720}
5721
5722static void
5723bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5724{
5725 switch (stringset) {
5726 case ETH_SS_STATS:
5727 memcpy(buf, bnx2_stats_str_arr,
5728 sizeof(bnx2_stats_str_arr));
5729 break;
5730 case ETH_SS_TEST:
5731 memcpy(buf, bnx2_tests_str_arr,
5732 sizeof(bnx2_tests_str_arr));
5733 break;
5734 }
5735}
5736
5737static int
5738bnx2_get_stats_count(struct net_device *dev)
5739{
5740 return BNX2_NUM_STATS;
5741}
5742
5743static void
5744bnx2_get_ethtool_stats(struct net_device *dev,
5745 struct ethtool_stats *stats, u64 *buf)
5746{
972ec0d4 5747 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5748 int i;
5749 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 5750 u8 *stats_len_arr = NULL;
b6016b76
MC
5751
5752 if (hw_stats == NULL) {
5753 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5754 return;
5755 }
5756
5b0c76ad
MC
5757 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5758 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5759 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5760 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 5761 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
5762 else
5763 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
5764
5765 for (i = 0; i < BNX2_NUM_STATS; i++) {
5766 if (stats_len_arr[i] == 0) {
5767 /* skip this counter */
5768 buf[i] = 0;
5769 continue;
5770 }
5771 if (stats_len_arr[i] == 4) {
5772 /* 4-byte counter */
5773 buf[i] = (u64)
5774 *(hw_stats + bnx2_stats_offset_arr[i]);
5775 continue;
5776 }
5777 /* 8-byte counter */
5778 buf[i] = (((u64) *(hw_stats +
5779 bnx2_stats_offset_arr[i])) << 32) +
5780 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5781 }
5782}
5783
5784static int
5785bnx2_phys_id(struct net_device *dev, u32 data)
5786{
972ec0d4 5787 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5788 int i;
5789 u32 save;
5790
5791 if (data == 0)
5792 data = 2;
5793
5794 save = REG_RD(bp, BNX2_MISC_CFG);
5795 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5796
5797 for (i = 0; i < (data * 2); i++) {
5798 if ((i % 2) == 0) {
5799 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5800 }
5801 else {
5802 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5803 BNX2_EMAC_LED_1000MB_OVERRIDE |
5804 BNX2_EMAC_LED_100MB_OVERRIDE |
5805 BNX2_EMAC_LED_10MB_OVERRIDE |
5806 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5807 BNX2_EMAC_LED_TRAFFIC);
5808 }
5809 msleep_interruptible(500);
5810 if (signal_pending(current))
5811 break;
5812 }
5813 REG_WR(bp, BNX2_EMAC_LED, 0);
5814 REG_WR(bp, BNX2_MISC_CFG, save);
5815 return 0;
5816}
5817
4666f87a
MC
5818static int
5819bnx2_set_tx_csum(struct net_device *dev, u32 data)
5820{
5821 struct bnx2 *bp = netdev_priv(dev);
5822
5823 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5824 return (ethtool_op_set_tx_hw_csum(dev, data));
5825 else
5826 return (ethtool_op_set_tx_csum(dev, data));
5827}
5828
7282d491 5829static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
5830 .get_settings = bnx2_get_settings,
5831 .set_settings = bnx2_set_settings,
5832 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
5833 .get_regs_len = bnx2_get_regs_len,
5834 .get_regs = bnx2_get_regs,
b6016b76
MC
5835 .get_wol = bnx2_get_wol,
5836 .set_wol = bnx2_set_wol,
5837 .nway_reset = bnx2_nway_reset,
5838 .get_link = ethtool_op_get_link,
5839 .get_eeprom_len = bnx2_get_eeprom_len,
5840 .get_eeprom = bnx2_get_eeprom,
5841 .set_eeprom = bnx2_set_eeprom,
5842 .get_coalesce = bnx2_get_coalesce,
5843 .set_coalesce = bnx2_set_coalesce,
5844 .get_ringparam = bnx2_get_ringparam,
5845 .set_ringparam = bnx2_set_ringparam,
5846 .get_pauseparam = bnx2_get_pauseparam,
5847 .set_pauseparam = bnx2_set_pauseparam,
5848 .get_rx_csum = bnx2_get_rx_csum,
5849 .set_rx_csum = bnx2_set_rx_csum,
5850 .get_tx_csum = ethtool_op_get_tx_csum,
4666f87a 5851 .set_tx_csum = bnx2_set_tx_csum,
b6016b76
MC
5852 .get_sg = ethtool_op_get_sg,
5853 .set_sg = ethtool_op_set_sg,
b6016b76 5854 .get_tso = ethtool_op_get_tso,
b11d6213 5855 .set_tso = bnx2_set_tso,
b6016b76
MC
5856 .self_test_count = bnx2_self_test_count,
5857 .self_test = bnx2_self_test,
5858 .get_strings = bnx2_get_strings,
5859 .phys_id = bnx2_phys_id,
5860 .get_stats_count = bnx2_get_stats_count,
5861 .get_ethtool_stats = bnx2_get_ethtool_stats,
24b8e05d 5862 .get_perm_addr = ethtool_op_get_perm_addr,
b6016b76
MC
5863};
5864
5865/* Called with rtnl_lock */
5866static int
5867bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5868{
14ab9b86 5869 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 5870 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5871 int err;
5872
5873 switch(cmd) {
5874 case SIOCGMIIPHY:
5875 data->phy_id = bp->phy_addr;
5876
5877 /* fallthru */
5878 case SIOCGMIIREG: {
5879 u32 mii_regval;
5880
dad3e452
MC
5881 if (!netif_running(dev))
5882 return -EAGAIN;
5883
c770a65c 5884 spin_lock_bh(&bp->phy_lock);
b6016b76 5885 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 5886 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5887
5888 data->val_out = mii_regval;
5889
5890 return err;
5891 }
5892
5893 case SIOCSMIIREG:
5894 if (!capable(CAP_NET_ADMIN))
5895 return -EPERM;
5896
dad3e452
MC
5897 if (!netif_running(dev))
5898 return -EAGAIN;
5899
c770a65c 5900 spin_lock_bh(&bp->phy_lock);
b6016b76 5901 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 5902 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5903
5904 return err;
5905
5906 default:
5907 /* do nothing */
5908 break;
5909 }
5910 return -EOPNOTSUPP;
5911}
5912
5913/* Called with rtnl_lock */
5914static int
5915bnx2_change_mac_addr(struct net_device *dev, void *p)
5916{
5917 struct sockaddr *addr = p;
972ec0d4 5918 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5919
73eef4cd
MC
5920 if (!is_valid_ether_addr(addr->sa_data))
5921 return -EINVAL;
5922
b6016b76
MC
5923 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5924 if (netif_running(dev))
5925 bnx2_set_mac_addr(bp);
5926
5927 return 0;
5928}
5929
5930/* Called with rtnl_lock */
5931static int
5932bnx2_change_mtu(struct net_device *dev, int new_mtu)
5933{
972ec0d4 5934 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5935
5936 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5937 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5938 return -EINVAL;
5939
5940 dev->mtu = new_mtu;
5941 if (netif_running(dev)) {
5942 bnx2_netif_stop(bp);
5943
5944 bnx2_init_nic(bp);
5945
5946 bnx2_netif_start(bp);
5947 }
5948 return 0;
5949}
5950
5951#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5952static void
5953poll_bnx2(struct net_device *dev)
5954{
972ec0d4 5955 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5956
5957 disable_irq(bp->pdev->irq);
7d12e780 5958 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
5959 enable_irq(bp->pdev->irq);
5960}
5961#endif
5962
253c8b75
MC
5963static void __devinit
5964bnx2_get_5709_media(struct bnx2 *bp)
5965{
5966 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5967 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5968 u32 strap;
5969
5970 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5971 return;
5972 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5973 bp->phy_flags |= PHY_SERDES_FLAG;
5974 return;
5975 }
5976
5977 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5978 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5979 else
5980 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5981
5982 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5983 switch (strap) {
5984 case 0x4:
5985 case 0x5:
5986 case 0x6:
5987 bp->phy_flags |= PHY_SERDES_FLAG;
5988 return;
5989 }
5990 } else {
5991 switch (strap) {
5992 case 0x1:
5993 case 0x2:
5994 case 0x4:
5995 bp->phy_flags |= PHY_SERDES_FLAG;
5996 return;
5997 }
5998 }
5999}
6000
b6016b76
MC
6001static int __devinit
6002bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6003{
6004 struct bnx2 *bp;
6005 unsigned long mem_len;
6006 int rc;
6007 u32 reg;
40453c83 6008 u64 dma_mask, persist_dma_mask;
b6016b76
MC
6009
6010 SET_MODULE_OWNER(dev);
6011 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 6012 bp = netdev_priv(dev);
b6016b76
MC
6013
6014 bp->flags = 0;
6015 bp->phy_flags = 0;
6016
6017 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6018 rc = pci_enable_device(pdev);
6019 if (rc) {
9b91cf9d 6020 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
b6016b76
MC
6021 goto err_out;
6022 }
6023
6024 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 6025 dev_err(&pdev->dev,
2e8a538d 6026 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
6027 rc = -ENODEV;
6028 goto err_out_disable;
6029 }
6030
6031 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6032 if (rc) {
9b91cf9d 6033 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
6034 goto err_out_disable;
6035 }
6036
6037 pci_set_master(pdev);
6038
6039 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6040 if (bp->pm_cap == 0) {
9b91cf9d 6041 dev_err(&pdev->dev,
2e8a538d 6042 "Cannot find power management capability, aborting.\n");
b6016b76
MC
6043 rc = -EIO;
6044 goto err_out_release;
6045 }
6046
b6016b76
MC
6047 bp->dev = dev;
6048 bp->pdev = pdev;
6049
6050 spin_lock_init(&bp->phy_lock);
1b8227c4 6051 spin_lock_init(&bp->indirect_lock);
c4028958 6052 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
6053
6054 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
59b47d8a 6055 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
b6016b76
MC
6056 dev->mem_end = dev->mem_start + mem_len;
6057 dev->irq = pdev->irq;
6058
6059 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6060
6061 if (!bp->regview) {
9b91cf9d 6062 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
6063 rc = -ENOMEM;
6064 goto err_out_release;
6065 }
6066
6067 /* Configure byte swap and enable write to the reg_window registers.
6068 * Rely on CPU to do target byte swapping on big endian systems
6069 * The chip's target access swapping will not swap all accesses
6070 */
6071 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6072 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6073 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6074
829ca9a3 6075 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6076
6077 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6078
59b47d8a
MC
6079 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
6080 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6081 if (bp->pcix_cap == 0) {
6082 dev_err(&pdev->dev,
6083 "Cannot find PCIX capability, aborting.\n");
6084 rc = -EIO;
6085 goto err_out_unmap;
6086 }
6087 }
6088
40453c83
MC
6089 /* 5708 cannot support DMA addresses > 40-bit. */
6090 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6091 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6092 else
6093 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6094
6095 /* Configure DMA attributes. */
6096 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6097 dev->features |= NETIF_F_HIGHDMA;
6098 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6099 if (rc) {
6100 dev_err(&pdev->dev,
6101 "pci_set_consistent_dma_mask failed, aborting.\n");
6102 goto err_out_unmap;
6103 }
6104 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6105 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6106 goto err_out_unmap;
6107 }
6108
b6016b76
MC
6109 /* Get bus information. */
6110 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6111 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6112 u32 clkreg;
6113
6114 bp->flags |= PCIX_FLAG;
6115
6116 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6aa20a22 6117
b6016b76
MC
6118 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6119 switch (clkreg) {
6120 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6121 bp->bus_speed_mhz = 133;
6122 break;
6123
6124 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6125 bp->bus_speed_mhz = 100;
6126 break;
6127
6128 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6129 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6130 bp->bus_speed_mhz = 66;
6131 break;
6132
6133 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6134 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6135 bp->bus_speed_mhz = 50;
6136 break;
6137
6138 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6139 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6140 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6141 bp->bus_speed_mhz = 33;
6142 break;
6143 }
6144 }
6145 else {
6146 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6147 bp->bus_speed_mhz = 66;
6148 else
6149 bp->bus_speed_mhz = 33;
6150 }
6151
6152 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6153 bp->flags |= PCI_32BIT_FLAG;
6154
6155 /* 5706A0 may falsely detect SERR and PERR. */
6156 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6157 reg = REG_RD(bp, PCI_COMMAND);
6158 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6159 REG_WR(bp, PCI_COMMAND, reg);
6160 }
6161 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6162 !(bp->flags & PCIX_FLAG)) {
6163
9b91cf9d 6164 dev_err(&pdev->dev,
2e8a538d 6165 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
6166 goto err_out_unmap;
6167 }
6168
6169 bnx2_init_nvram(bp);
6170
e3648b3d
MC
6171 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6172
6173 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
6174 BNX2_SHM_HDR_SIGNATURE_SIG) {
6175 u32 off = PCI_FUNC(pdev->devfn) << 2;
6176
6177 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6178 } else
e3648b3d
MC
6179 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6180
b6016b76
MC
6181 /* Get the permanent MAC address. First we need to make sure the
6182 * firmware is actually running.
6183 */
e3648b3d 6184 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
6185
6186 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6187 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 6188 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
6189 rc = -ENODEV;
6190 goto err_out_unmap;
6191 }
6192
e3648b3d 6193 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
b6016b76 6194
e3648b3d 6195 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
6196 bp->mac_addr[0] = (u8) (reg >> 8);
6197 bp->mac_addr[1] = (u8) reg;
6198
e3648b3d 6199 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
6200 bp->mac_addr[2] = (u8) (reg >> 24);
6201 bp->mac_addr[3] = (u8) (reg >> 16);
6202 bp->mac_addr[4] = (u8) (reg >> 8);
6203 bp->mac_addr[5] = (u8) reg;
6204
6205 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 6206 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
6207
6208 bp->rx_csum = 1;
6209
6210 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6211
6212 bp->tx_quick_cons_trip_int = 20;
6213 bp->tx_quick_cons_trip = 20;
6214 bp->tx_ticks_int = 80;
6215 bp->tx_ticks = 80;
6aa20a22 6216
b6016b76
MC
6217 bp->rx_quick_cons_trip_int = 6;
6218 bp->rx_quick_cons_trip = 6;
6219 bp->rx_ticks_int = 18;
6220 bp->rx_ticks = 18;
6221
6222 bp->stats_ticks = 1000000 & 0xffff00;
6223
6224 bp->timer_interval = HZ;
cd339a0e 6225 bp->current_interval = HZ;
b6016b76 6226
5b0c76ad
MC
6227 bp->phy_addr = 1;
6228
b6016b76 6229 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
6230 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6231 bnx2_get_5709_media(bp);
6232 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
b6016b76 6233 bp->phy_flags |= PHY_SERDES_FLAG;
bac0dff6
MC
6234
6235 if (bp->phy_flags & PHY_SERDES_FLAG) {
b6016b76 6236 bp->flags |= NO_WOL_FLAG;
bac0dff6 6237 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5b0c76ad 6238 bp->phy_addr = 2;
e3648b3d 6239 reg = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
6240 BNX2_SHARED_HW_CFG_CONFIG);
6241 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6242 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6243 }
261dd5ca
MC
6244 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6245 CHIP_NUM(bp) == CHIP_NUM_5708)
6246 bp->phy_flags |= PHY_CRC_FIX_FLAG;
b659f44e
MC
6247 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6248 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
b6016b76 6249
16088272
MC
6250 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6251 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6252 (CHIP_ID(bp) == CHIP_ID_5708_B1))
dda1e390
MC
6253 bp->flags |= NO_WOL_FLAG;
6254
b6016b76
MC
6255 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6256 bp->tx_quick_cons_trip_int =
6257 bp->tx_quick_cons_trip;
6258 bp->tx_ticks_int = bp->tx_ticks;
6259 bp->rx_quick_cons_trip_int =
6260 bp->rx_quick_cons_trip;
6261 bp->rx_ticks_int = bp->rx_ticks;
6262 bp->comp_prod_trip_int = bp->comp_prod_trip;
6263 bp->com_ticks_int = bp->com_ticks;
6264 bp->cmd_ticks_int = bp->cmd_ticks;
6265 }
6266
f9317a40
MC
6267 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6268 *
6269 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6270 * with byte enables disabled on the unused 32-bit word. This is legal
6271 * but causes problems on the AMD 8132 which will eventually stop
6272 * responding after a while.
6273 *
6274 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 6275 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
6276 */
6277 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6278 struct pci_dev *amd_8132 = NULL;
6279
6280 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6281 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6282 amd_8132))) {
6283 u8 rev;
6284
6285 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6286 if (rev >= 0x10 && rev <= 0x13) {
6287 disable_msi = 1;
6288 pci_dev_put(amd_8132);
6289 break;
6290 }
6291 }
6292 }
6293
b6016b76
MC
6294 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6295 bp->req_line_speed = 0;
6296 if (bp->phy_flags & PHY_SERDES_FLAG) {
6297 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
cd339a0e 6298
e3648b3d 6299 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
cd339a0e
MC
6300 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6301 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6302 bp->autoneg = 0;
6303 bp->req_line_speed = bp->line_speed = SPEED_1000;
6304 bp->req_duplex = DUPLEX_FULL;
6305 }
b6016b76
MC
6306 }
6307 else {
6308 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6309 }
6310
6311 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6312
cd339a0e
MC
6313 init_timer(&bp->timer);
6314 bp->timer.expires = RUN_AT(bp->timer_interval);
6315 bp->timer.data = (unsigned long) bp;
6316 bp->timer.function = bnx2_timer;
6317
b6016b76
MC
6318 return 0;
6319
6320err_out_unmap:
6321 if (bp->regview) {
6322 iounmap(bp->regview);
73eef4cd 6323 bp->regview = NULL;
b6016b76
MC
6324 }
6325
6326err_out_release:
6327 pci_release_regions(pdev);
6328
6329err_out_disable:
6330 pci_disable_device(pdev);
6331 pci_set_drvdata(pdev, NULL);
6332
6333err_out:
6334 return rc;
6335}
6336
6337static int __devinit
6338bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6339{
6340 static int version_printed = 0;
6341 struct net_device *dev = NULL;
6342 struct bnx2 *bp;
6343 int rc, i;
6344
6345 if (version_printed++ == 0)
6346 printk(KERN_INFO "%s", version);
6347
6348 /* dev zeroed in init_etherdev */
6349 dev = alloc_etherdev(sizeof(*bp));
6350
6351 if (!dev)
6352 return -ENOMEM;
6353
6354 rc = bnx2_init_board(pdev, dev);
6355 if (rc < 0) {
6356 free_netdev(dev);
6357 return rc;
6358 }
6359
6360 dev->open = bnx2_open;
6361 dev->hard_start_xmit = bnx2_start_xmit;
6362 dev->stop = bnx2_close;
6363 dev->get_stats = bnx2_get_stats;
6364 dev->set_multicast_list = bnx2_set_rx_mode;
6365 dev->do_ioctl = bnx2_ioctl;
6366 dev->set_mac_address = bnx2_change_mac_addr;
6367 dev->change_mtu = bnx2_change_mtu;
6368 dev->tx_timeout = bnx2_tx_timeout;
6369 dev->watchdog_timeo = TX_TIMEOUT;
6370#ifdef BCM_VLAN
6371 dev->vlan_rx_register = bnx2_vlan_rx_register;
6372 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6373#endif
6374 dev->poll = bnx2_poll;
6375 dev->ethtool_ops = &bnx2_ethtool_ops;
6376 dev->weight = 64;
6377
972ec0d4 6378 bp = netdev_priv(dev);
b6016b76
MC
6379
6380#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6381 dev->poll_controller = poll_bnx2;
6382#endif
6383
1b2f922f
MC
6384 pci_set_drvdata(pdev, dev);
6385
6386 memcpy(dev->dev_addr, bp->mac_addr, 6);
6387 memcpy(dev->perm_addr, bp->mac_addr, 6);
6388 bp->name = board_info[ent->driver_data].name;
6389
4666f87a
MC
6390 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6391 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6392 else
6393 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1b2f922f
MC
6394#ifdef BCM_VLAN
6395 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6396#endif
6397 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6398 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6399 dev->features |= NETIF_F_TSO6;
1b2f922f 6400
b6016b76 6401 if ((rc = register_netdev(dev))) {
9b91cf9d 6402 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
6403 if (bp->regview)
6404 iounmap(bp->regview);
6405 pci_release_regions(pdev);
6406 pci_disable_device(pdev);
6407 pci_set_drvdata(pdev, NULL);
6408 free_netdev(dev);
6409 return rc;
6410 }
6411
b6016b76
MC
6412 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6413 "IRQ %d, ",
6414 dev->name,
6415 bp->name,
6416 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6417 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6418 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6419 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6420 bp->bus_speed_mhz,
6421 dev->base_addr,
6422 bp->pdev->irq);
6423
6424 printk("node addr ");
6425 for (i = 0; i < 6; i++)
6426 printk("%2.2x", dev->dev_addr[i]);
6427 printk("\n");
6428
b6016b76
MC
6429 return 0;
6430}
6431
6432static void __devexit
6433bnx2_remove_one(struct pci_dev *pdev)
6434{
6435 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6436 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6437
afdc08b9
MC
6438 flush_scheduled_work();
6439
b6016b76
MC
6440 unregister_netdev(dev);
6441
6442 if (bp->regview)
6443 iounmap(bp->regview);
6444
6445 free_netdev(dev);
6446 pci_release_regions(pdev);
6447 pci_disable_device(pdev);
6448 pci_set_drvdata(pdev, NULL);
6449}
6450
6451static int
829ca9a3 6452bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
6453{
6454 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6455 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6456 u32 reset_code;
6457
6458 if (!netif_running(dev))
6459 return 0;
6460
1d60290f 6461 flush_scheduled_work();
b6016b76
MC
6462 bnx2_netif_stop(bp);
6463 netif_device_detach(dev);
6464 del_timer_sync(&bp->timer);
dda1e390 6465 if (bp->flags & NO_WOL_FLAG)
6c4f095e 6466 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 6467 else if (bp->wol)
b6016b76
MC
6468 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6469 else
6470 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6471 bnx2_reset_chip(bp, reset_code);
6472 bnx2_free_skbs(bp);
30c517b2 6473 pci_save_state(pdev);
829ca9a3 6474 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
6475 return 0;
6476}
6477
6478static int
6479bnx2_resume(struct pci_dev *pdev)
6480{
6481 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6482 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6483
6484 if (!netif_running(dev))
6485 return 0;
6486
30c517b2 6487 pci_restore_state(pdev);
829ca9a3 6488 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6489 netif_device_attach(dev);
6490 bnx2_init_nic(bp);
6491 bnx2_netif_start(bp);
6492 return 0;
6493}
6494
6495static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
6496 .name = DRV_MODULE_NAME,
6497 .id_table = bnx2_pci_tbl,
6498 .probe = bnx2_init_one,
6499 .remove = __devexit_p(bnx2_remove_one),
6500 .suspend = bnx2_suspend,
6501 .resume = bnx2_resume,
b6016b76
MC
6502};
6503
6504static int __init bnx2_init(void)
6505{
29917620 6506 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
6507}
6508
6509static void __exit bnx2_cleanup(void)
6510{
6511 pci_unregister_driver(&bnx2_pci_driver);
6512}
6513
6514module_init(bnx2_init);
6515module_exit(bnx2_cleanup);
6516
6517
6518