[BNX2]: Restructure RX fast path handling.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
72fbaeb6 3 * Copyright (c) 2004-2007 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
1977f032 29#include <linux/bitops.h>
f2a4f052
MC
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
f2a4f052 42#include <net/ip.h>
de081fa5 43#include <net/tcp.h>
f2a4f052 44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
fba9fe91 49#include <linux/zlib.h>
f2a4f052 50
b6016b76
MC
51#include "bnx2.h"
52#include "bnx2_fw.h"
d43584c8 53#include "bnx2_fw2.h"
b6016b76 54
b3448b0b
DV
55#define FW_BUF_SIZE 0x8000
56
b6016b76
MC
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
bbe42974
MC
59#define DRV_MODULE_VERSION "1.6.9"
60#define DRV_MODULE_RELDATE "December 8, 2007"
b6016b76
MC
61
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
e19360f2 67static const char version[] __devinitdata =
b6016b76
MC
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 71MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
72MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
5b0c76ad
MC
86 BCM5708,
87 BCM5708S,
bac0dff6 88 BCM5709,
27a005b8 89 BCM5709S,
b6016b76
MC
90} board_t;
91
92/* indexed by board_t, above */
f71e1309 93static const struct {
b6016b76
MC
94 char *name;
95} board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
b6016b76
MC
105 };
106
107static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
b6016b76
MC
126 { 0, }
127};
128
129static struct flash_spec flash_table[] =
130{
e30372c9
MC
131#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 133 /* Slow EEPROM */
37137709 134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
37137709
MC
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
b6016b76
MC
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
37137709 145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
37137709 151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
37137709
MC
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
b6016b76
MC
216};
217
e30372c9
MC
218static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
225};
226
b6016b76
MC
227MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
e89bbf10
MC
229static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230{
2f8af120 231 u32 diff;
e89bbf10 232
2f8af120 233 smp_mb();
faac9c4b
MC
234
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
237 */
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
243 }
e89bbf10
MC
244 return (bp->tx_ring_size - diff);
245}
246
b6016b76
MC
247static u32
248bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249{
1b8227c4
MC
250 u32 val;
251
252 spin_lock_bh(&bp->indirect_lock);
b6016b76 253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
b6016b76
MC
257}
258
259static void
260bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261{
1b8227c4 262 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 265 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
266}
267
268static void
269bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270{
271 offset += cid_addr;
1b8227c4 272 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
275
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
285 }
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
289 }
1b8227c4 290 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
291}
292
293static int
294bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295{
296 u32 val1;
297 int i, ret;
298
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306 udelay(40);
307 }
308
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314 for (i = 0; i < 50; i++) {
315 udelay(10);
316
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
331 }
332 else {
333 *val = val1;
334 ret = 0;
335 }
336
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344 udelay(40);
345 }
346
347 return ret;
348}
349
350static int
351bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352{
353 u32 val1;
354 int i, ret;
355
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 370
b6016b76
MC
371 for (i = 0; i < 50; i++) {
372 udelay(10);
373
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
378 }
379 }
380
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
385
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393 udelay(40);
394 }
395
396 return ret;
397}
398
399static void
400bnx2_disable_int(struct bnx2 *bp)
401{
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405}
406
407static void
408bnx2_enable_int(struct bnx2 *bp)
409{
1269a8a6
MC
410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
b6016b76
MC
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
bf5295bb 417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
418}
419
420static void
421bnx2_disable_int_sync(struct bnx2 *bp)
422{
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
426}
427
428static void
429bnx2_netif_stop(struct bnx2 *bp)
430{
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
bea3348e 433 napi_disable(&bp->napi);
b6016b76
MC
434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436 }
437}
438
439static void
440bnx2_netif_start(struct bnx2 *bp)
441{
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
bea3348e 445 napi_enable(&bp->napi);
b6016b76
MC
446 bnx2_enable_int(bp);
447 }
448 }
449}
450
451static void
452bnx2_free_mem(struct bnx2 *bp)
453{
13daffa2
MC
454 int i;
455
59b47d8a
MC
456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459 bp->ctx_blk[i],
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
462 }
463 }
b6016b76 464 if (bp->status_blk) {
0f31f994 465 pci_free_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
0f31f994 468 bp->stats_blk = NULL;
b6016b76
MC
469 }
470 if (bp->tx_desc_ring) {
e343d55c 471 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
b6016b76
MC
472 bp->tx_desc_ring, bp->tx_desc_mapping);
473 bp->tx_desc_ring = NULL;
474 }
b4558ea9
JJ
475 kfree(bp->tx_buf_ring);
476 bp->tx_buf_ring = NULL;
13daffa2
MC
477 for (i = 0; i < bp->rx_max_ring; i++) {
478 if (bp->rx_desc_ring[i])
e343d55c 479 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
13daffa2
MC
480 bp->rx_desc_ring[i],
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
483 }
484 vfree(bp->rx_buf_ring);
b4558ea9 485 bp->rx_buf_ring = NULL;
b6016b76
MC
486}
487
488static int
489bnx2_alloc_mem(struct bnx2 *bp)
490{
0f31f994 491 int i, status_blk_size;
13daffa2 492
e343d55c 493 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
b6016b76
MC
494 if (bp->tx_buf_ring == NULL)
495 return -ENOMEM;
496
e343d55c 497 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
b6016b76
MC
498 &bp->tx_desc_mapping);
499 if (bp->tx_desc_ring == NULL)
500 goto alloc_mem_err;
501
e343d55c 502 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
b6016b76
MC
503 if (bp->rx_buf_ring == NULL)
504 goto alloc_mem_err;
505
e343d55c 506 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
13daffa2
MC
507
508 for (i = 0; i < bp->rx_max_ring; i++) {
509 bp->rx_desc_ring[i] =
e343d55c 510 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
13daffa2
MC
511 &bp->rx_desc_mapping[i]);
512 if (bp->rx_desc_ring[i] == NULL)
513 goto alloc_mem_err;
514
515 }
b6016b76 516
0f31f994
MC
517 /* Combine status and statistics blocks into one allocation. */
518 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
519 bp->status_stats_size = status_blk_size +
520 sizeof(struct statistics_block);
521
522 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
523 &bp->status_blk_mapping);
524 if (bp->status_blk == NULL)
525 goto alloc_mem_err;
526
0f31f994 527 memset(bp->status_blk, 0, bp->status_stats_size);
b6016b76 528
0f31f994
MC
529 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
530 status_blk_size);
b6016b76 531
0f31f994 532 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 533
59b47d8a
MC
534 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
535 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
536 if (bp->ctx_pages == 0)
537 bp->ctx_pages = 1;
538 for (i = 0; i < bp->ctx_pages; i++) {
539 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
540 BCM_PAGE_SIZE,
541 &bp->ctx_blk_mapping[i]);
542 if (bp->ctx_blk[i] == NULL)
543 goto alloc_mem_err;
544 }
545 }
b6016b76
MC
546 return 0;
547
548alloc_mem_err:
549 bnx2_free_mem(bp);
550 return -ENOMEM;
551}
552
e3648b3d
MC
553static void
554bnx2_report_fw_link(struct bnx2 *bp)
555{
556 u32 fw_link_status = 0;
557
0d8a6571
MC
558 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
559 return;
560
e3648b3d
MC
561 if (bp->link_up) {
562 u32 bmsr;
563
564 switch (bp->line_speed) {
565 case SPEED_10:
566 if (bp->duplex == DUPLEX_HALF)
567 fw_link_status = BNX2_LINK_STATUS_10HALF;
568 else
569 fw_link_status = BNX2_LINK_STATUS_10FULL;
570 break;
571 case SPEED_100:
572 if (bp->duplex == DUPLEX_HALF)
573 fw_link_status = BNX2_LINK_STATUS_100HALF;
574 else
575 fw_link_status = BNX2_LINK_STATUS_100FULL;
576 break;
577 case SPEED_1000:
578 if (bp->duplex == DUPLEX_HALF)
579 fw_link_status = BNX2_LINK_STATUS_1000HALF;
580 else
581 fw_link_status = BNX2_LINK_STATUS_1000FULL;
582 break;
583 case SPEED_2500:
584 if (bp->duplex == DUPLEX_HALF)
585 fw_link_status = BNX2_LINK_STATUS_2500HALF;
586 else
587 fw_link_status = BNX2_LINK_STATUS_2500FULL;
588 break;
589 }
590
591 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
592
593 if (bp->autoneg) {
594 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
595
ca58c3af
MC
596 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
597 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
598
599 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
600 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
601 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
602 else
603 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
604 }
605 }
606 else
607 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
608
609 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
610}
611
9b1084b8
MC
612static char *
613bnx2_xceiver_str(struct bnx2 *bp)
614{
615 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
616 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
617 "Copper"));
618}
619
b6016b76
MC
620static void
621bnx2_report_link(struct bnx2 *bp)
622{
623 if (bp->link_up) {
624 netif_carrier_on(bp->dev);
9b1084b8
MC
625 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
626 bnx2_xceiver_str(bp));
b6016b76
MC
627
628 printk("%d Mbps ", bp->line_speed);
629
630 if (bp->duplex == DUPLEX_FULL)
631 printk("full duplex");
632 else
633 printk("half duplex");
634
635 if (bp->flow_ctrl) {
636 if (bp->flow_ctrl & FLOW_CTRL_RX) {
637 printk(", receive ");
638 if (bp->flow_ctrl & FLOW_CTRL_TX)
639 printk("& transmit ");
640 }
641 else {
642 printk(", transmit ");
643 }
644 printk("flow control ON");
645 }
646 printk("\n");
647 }
648 else {
649 netif_carrier_off(bp->dev);
9b1084b8
MC
650 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
651 bnx2_xceiver_str(bp));
b6016b76 652 }
e3648b3d
MC
653
654 bnx2_report_fw_link(bp);
b6016b76
MC
655}
656
657static void
658bnx2_resolve_flow_ctrl(struct bnx2 *bp)
659{
660 u32 local_adv, remote_adv;
661
662 bp->flow_ctrl = 0;
6aa20a22 663 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
664 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
665
666 if (bp->duplex == DUPLEX_FULL) {
667 bp->flow_ctrl = bp->req_flow_ctrl;
668 }
669 return;
670 }
671
672 if (bp->duplex != DUPLEX_FULL) {
673 return;
674 }
675
5b0c76ad
MC
676 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
677 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
678 u32 val;
679
680 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
681 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
682 bp->flow_ctrl |= FLOW_CTRL_TX;
683 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
684 bp->flow_ctrl |= FLOW_CTRL_RX;
685 return;
686 }
687
ca58c3af
MC
688 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
689 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
690
691 if (bp->phy_flags & PHY_SERDES_FLAG) {
692 u32 new_local_adv = 0;
693 u32 new_remote_adv = 0;
694
695 if (local_adv & ADVERTISE_1000XPAUSE)
696 new_local_adv |= ADVERTISE_PAUSE_CAP;
697 if (local_adv & ADVERTISE_1000XPSE_ASYM)
698 new_local_adv |= ADVERTISE_PAUSE_ASYM;
699 if (remote_adv & ADVERTISE_1000XPAUSE)
700 new_remote_adv |= ADVERTISE_PAUSE_CAP;
701 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
702 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
703
704 local_adv = new_local_adv;
705 remote_adv = new_remote_adv;
706 }
707
708 /* See Table 28B-3 of 802.3ab-1999 spec. */
709 if (local_adv & ADVERTISE_PAUSE_CAP) {
710 if(local_adv & ADVERTISE_PAUSE_ASYM) {
711 if (remote_adv & ADVERTISE_PAUSE_CAP) {
712 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
713 }
714 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
715 bp->flow_ctrl = FLOW_CTRL_RX;
716 }
717 }
718 else {
719 if (remote_adv & ADVERTISE_PAUSE_CAP) {
720 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
721 }
722 }
723 }
724 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
725 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
726 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
727
728 bp->flow_ctrl = FLOW_CTRL_TX;
729 }
730 }
731}
732
27a005b8
MC
733static int
734bnx2_5709s_linkup(struct bnx2 *bp)
735{
736 u32 val, speed;
737
738 bp->link_up = 1;
739
740 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
741 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
742 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
743
744 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
745 bp->line_speed = bp->req_line_speed;
746 bp->duplex = bp->req_duplex;
747 return 0;
748 }
749 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
750 switch (speed) {
751 case MII_BNX2_GP_TOP_AN_SPEED_10:
752 bp->line_speed = SPEED_10;
753 break;
754 case MII_BNX2_GP_TOP_AN_SPEED_100:
755 bp->line_speed = SPEED_100;
756 break;
757 case MII_BNX2_GP_TOP_AN_SPEED_1G:
758 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
759 bp->line_speed = SPEED_1000;
760 break;
761 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
762 bp->line_speed = SPEED_2500;
763 break;
764 }
765 if (val & MII_BNX2_GP_TOP_AN_FD)
766 bp->duplex = DUPLEX_FULL;
767 else
768 bp->duplex = DUPLEX_HALF;
769 return 0;
770}
771
b6016b76 772static int
5b0c76ad
MC
773bnx2_5708s_linkup(struct bnx2 *bp)
774{
775 u32 val;
776
777 bp->link_up = 1;
778 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
779 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
780 case BCM5708S_1000X_STAT1_SPEED_10:
781 bp->line_speed = SPEED_10;
782 break;
783 case BCM5708S_1000X_STAT1_SPEED_100:
784 bp->line_speed = SPEED_100;
785 break;
786 case BCM5708S_1000X_STAT1_SPEED_1G:
787 bp->line_speed = SPEED_1000;
788 break;
789 case BCM5708S_1000X_STAT1_SPEED_2G5:
790 bp->line_speed = SPEED_2500;
791 break;
792 }
793 if (val & BCM5708S_1000X_STAT1_FD)
794 bp->duplex = DUPLEX_FULL;
795 else
796 bp->duplex = DUPLEX_HALF;
797
798 return 0;
799}
800
801static int
802bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
803{
804 u32 bmcr, local_adv, remote_adv, common;
805
806 bp->link_up = 1;
807 bp->line_speed = SPEED_1000;
808
ca58c3af 809 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
810 if (bmcr & BMCR_FULLDPLX) {
811 bp->duplex = DUPLEX_FULL;
812 }
813 else {
814 bp->duplex = DUPLEX_HALF;
815 }
816
817 if (!(bmcr & BMCR_ANENABLE)) {
818 return 0;
819 }
820
ca58c3af
MC
821 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
822 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
823
824 common = local_adv & remote_adv;
825 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
826
827 if (common & ADVERTISE_1000XFULL) {
828 bp->duplex = DUPLEX_FULL;
829 }
830 else {
831 bp->duplex = DUPLEX_HALF;
832 }
833 }
834
835 return 0;
836}
837
838static int
839bnx2_copper_linkup(struct bnx2 *bp)
840{
841 u32 bmcr;
842
ca58c3af 843 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
844 if (bmcr & BMCR_ANENABLE) {
845 u32 local_adv, remote_adv, common;
846
847 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
848 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
849
850 common = local_adv & (remote_adv >> 2);
851 if (common & ADVERTISE_1000FULL) {
852 bp->line_speed = SPEED_1000;
853 bp->duplex = DUPLEX_FULL;
854 }
855 else if (common & ADVERTISE_1000HALF) {
856 bp->line_speed = SPEED_1000;
857 bp->duplex = DUPLEX_HALF;
858 }
859 else {
ca58c3af
MC
860 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
861 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
862
863 common = local_adv & remote_adv;
864 if (common & ADVERTISE_100FULL) {
865 bp->line_speed = SPEED_100;
866 bp->duplex = DUPLEX_FULL;
867 }
868 else if (common & ADVERTISE_100HALF) {
869 bp->line_speed = SPEED_100;
870 bp->duplex = DUPLEX_HALF;
871 }
872 else if (common & ADVERTISE_10FULL) {
873 bp->line_speed = SPEED_10;
874 bp->duplex = DUPLEX_FULL;
875 }
876 else if (common & ADVERTISE_10HALF) {
877 bp->line_speed = SPEED_10;
878 bp->duplex = DUPLEX_HALF;
879 }
880 else {
881 bp->line_speed = 0;
882 bp->link_up = 0;
883 }
884 }
885 }
886 else {
887 if (bmcr & BMCR_SPEED100) {
888 bp->line_speed = SPEED_100;
889 }
890 else {
891 bp->line_speed = SPEED_10;
892 }
893 if (bmcr & BMCR_FULLDPLX) {
894 bp->duplex = DUPLEX_FULL;
895 }
896 else {
897 bp->duplex = DUPLEX_HALF;
898 }
899 }
900
901 return 0;
902}
903
904static int
905bnx2_set_mac_link(struct bnx2 *bp)
906{
907 u32 val;
908
909 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
910 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
911 (bp->duplex == DUPLEX_HALF)) {
912 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
913 }
914
915 /* Configure the EMAC mode register. */
916 val = REG_RD(bp, BNX2_EMAC_MODE);
917
918 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 919 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 920 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
921
922 if (bp->link_up) {
5b0c76ad
MC
923 switch (bp->line_speed) {
924 case SPEED_10:
59b47d8a
MC
925 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
926 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
927 break;
928 }
929 /* fall through */
930 case SPEED_100:
931 val |= BNX2_EMAC_MODE_PORT_MII;
932 break;
933 case SPEED_2500:
59b47d8a 934 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
935 /* fall through */
936 case SPEED_1000:
937 val |= BNX2_EMAC_MODE_PORT_GMII;
938 break;
939 }
b6016b76
MC
940 }
941 else {
942 val |= BNX2_EMAC_MODE_PORT_GMII;
943 }
944
945 /* Set the MAC to operate in the appropriate duplex mode. */
946 if (bp->duplex == DUPLEX_HALF)
947 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
948 REG_WR(bp, BNX2_EMAC_MODE, val);
949
950 /* Enable/disable rx PAUSE. */
951 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
952
953 if (bp->flow_ctrl & FLOW_CTRL_RX)
954 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
955 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
956
957 /* Enable/disable tx PAUSE. */
958 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
959 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
960
961 if (bp->flow_ctrl & FLOW_CTRL_TX)
962 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
963 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
964
965 /* Acknowledge the interrupt. */
966 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
967
968 return 0;
969}
970
27a005b8
MC
971static void
972bnx2_enable_bmsr1(struct bnx2 *bp)
973{
974 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
975 (CHIP_NUM(bp) == CHIP_NUM_5709))
976 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
977 MII_BNX2_BLK_ADDR_GP_STATUS);
978}
979
980static void
981bnx2_disable_bmsr1(struct bnx2 *bp)
982{
983 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
984 (CHIP_NUM(bp) == CHIP_NUM_5709))
985 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
986 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
987}
988
605a9e20
MC
989static int
990bnx2_test_and_enable_2g5(struct bnx2 *bp)
991{
992 u32 up1;
993 int ret = 1;
994
995 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
996 return 0;
997
998 if (bp->autoneg & AUTONEG_SPEED)
999 bp->advertising |= ADVERTISED_2500baseX_Full;
1000
27a005b8
MC
1001 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1002 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1003
605a9e20
MC
1004 bnx2_read_phy(bp, bp->mii_up1, &up1);
1005 if (!(up1 & BCM5708S_UP1_2G5)) {
1006 up1 |= BCM5708S_UP1_2G5;
1007 bnx2_write_phy(bp, bp->mii_up1, up1);
1008 ret = 0;
1009 }
1010
27a005b8
MC
1011 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1012 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1013 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1014
605a9e20
MC
1015 return ret;
1016}
1017
1018static int
1019bnx2_test_and_disable_2g5(struct bnx2 *bp)
1020{
1021 u32 up1;
1022 int ret = 0;
1023
1024 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1025 return 0;
1026
27a005b8
MC
1027 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1028 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1029
605a9e20
MC
1030 bnx2_read_phy(bp, bp->mii_up1, &up1);
1031 if (up1 & BCM5708S_UP1_2G5) {
1032 up1 &= ~BCM5708S_UP1_2G5;
1033 bnx2_write_phy(bp, bp->mii_up1, up1);
1034 ret = 1;
1035 }
1036
27a005b8
MC
1037 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1038 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1039 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1040
605a9e20
MC
1041 return ret;
1042}
1043
1044static void
1045bnx2_enable_forced_2g5(struct bnx2 *bp)
1046{
1047 u32 bmcr;
1048
1049 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1050 return;
1051
27a005b8
MC
1052 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1053 u32 val;
1054
1055 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056 MII_BNX2_BLK_ADDR_SERDES_DIG);
1057 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1058 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1059 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1060 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1061
1062 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1063 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1064 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1065
1066 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1067 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1068 bmcr |= BCM5708S_BMCR_FORCE_2500;
1069 }
1070
1071 if (bp->autoneg & AUTONEG_SPEED) {
1072 bmcr &= ~BMCR_ANENABLE;
1073 if (bp->req_duplex == DUPLEX_FULL)
1074 bmcr |= BMCR_FULLDPLX;
1075 }
1076 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1077}
1078
1079static void
1080bnx2_disable_forced_2g5(struct bnx2 *bp)
1081{
1082 u32 bmcr;
1083
1084 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1085 return;
1086
27a005b8
MC
1087 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1088 u32 val;
1089
1090 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1091 MII_BNX2_BLK_ADDR_SERDES_DIG);
1092 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1093 val &= ~MII_BNX2_SD_MISC1_FORCE;
1094 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1095
1096 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1097 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1099
1100 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1101 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1102 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1103 }
1104
1105 if (bp->autoneg & AUTONEG_SPEED)
1106 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1107 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1108}
1109
b6016b76
MC
1110static int
1111bnx2_set_link(struct bnx2 *bp)
1112{
1113 u32 bmsr;
1114 u8 link_up;
1115
80be4434 1116 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1117 bp->link_up = 1;
1118 return 0;
1119 }
1120
0d8a6571
MC
1121 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1122 return 0;
1123
b6016b76
MC
1124 link_up = bp->link_up;
1125
27a005b8
MC
1126 bnx2_enable_bmsr1(bp);
1127 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1128 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1129 bnx2_disable_bmsr1(bp);
b6016b76
MC
1130
1131 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1132 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1133 u32 val;
1134
1135 val = REG_RD(bp, BNX2_EMAC_STATUS);
1136 if (val & BNX2_EMAC_STATUS_LINK)
1137 bmsr |= BMSR_LSTATUS;
1138 else
1139 bmsr &= ~BMSR_LSTATUS;
1140 }
1141
1142 if (bmsr & BMSR_LSTATUS) {
1143 bp->link_up = 1;
1144
1145 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1146 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1147 bnx2_5706s_linkup(bp);
1148 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1149 bnx2_5708s_linkup(bp);
27a005b8
MC
1150 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1151 bnx2_5709s_linkup(bp);
b6016b76
MC
1152 }
1153 else {
1154 bnx2_copper_linkup(bp);
1155 }
1156 bnx2_resolve_flow_ctrl(bp);
1157 }
1158 else {
1159 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
605a9e20
MC
1160 (bp->autoneg & AUTONEG_SPEED))
1161 bnx2_disable_forced_2g5(bp);
b6016b76 1162
b6016b76
MC
1163 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1164 bp->link_up = 0;
1165 }
1166
1167 if (bp->link_up != link_up) {
1168 bnx2_report_link(bp);
1169 }
1170
1171 bnx2_set_mac_link(bp);
1172
1173 return 0;
1174}
1175
1176static int
1177bnx2_reset_phy(struct bnx2 *bp)
1178{
1179 int i;
1180 u32 reg;
1181
ca58c3af 1182 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1183
1184#define PHY_RESET_MAX_WAIT 100
1185 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1186 udelay(10);
1187
ca58c3af 1188 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1189 if (!(reg & BMCR_RESET)) {
1190 udelay(20);
1191 break;
1192 }
1193 }
1194 if (i == PHY_RESET_MAX_WAIT) {
1195 return -EBUSY;
1196 }
1197 return 0;
1198}
1199
1200static u32
1201bnx2_phy_get_pause_adv(struct bnx2 *bp)
1202{
1203 u32 adv = 0;
1204
1205 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1206 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1207
1208 if (bp->phy_flags & PHY_SERDES_FLAG) {
1209 adv = ADVERTISE_1000XPAUSE;
1210 }
1211 else {
1212 adv = ADVERTISE_PAUSE_CAP;
1213 }
1214 }
1215 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1216 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217 adv = ADVERTISE_1000XPSE_ASYM;
1218 }
1219 else {
1220 adv = ADVERTISE_PAUSE_ASYM;
1221 }
1222 }
1223 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1224 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1226 }
1227 else {
1228 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1229 }
1230 }
1231 return adv;
1232}
1233
0d8a6571
MC
1234static int bnx2_fw_sync(struct bnx2 *, u32, int);
1235
b6016b76 1236static int
0d8a6571
MC
1237bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1238{
1239 u32 speed_arg = 0, pause_adv;
1240
1241 pause_adv = bnx2_phy_get_pause_adv(bp);
1242
1243 if (bp->autoneg & AUTONEG_SPEED) {
1244 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1245 if (bp->advertising & ADVERTISED_10baseT_Half)
1246 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1247 if (bp->advertising & ADVERTISED_10baseT_Full)
1248 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1249 if (bp->advertising & ADVERTISED_100baseT_Half)
1250 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1251 if (bp->advertising & ADVERTISED_100baseT_Full)
1252 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1253 if (bp->advertising & ADVERTISED_1000baseT_Full)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1255 if (bp->advertising & ADVERTISED_2500baseX_Full)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1257 } else {
1258 if (bp->req_line_speed == SPEED_2500)
1259 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1260 else if (bp->req_line_speed == SPEED_1000)
1261 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1262 else if (bp->req_line_speed == SPEED_100) {
1263 if (bp->req_duplex == DUPLEX_FULL)
1264 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1265 else
1266 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1267 } else if (bp->req_line_speed == SPEED_10) {
1268 if (bp->req_duplex == DUPLEX_FULL)
1269 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1270 else
1271 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1272 }
1273 }
1274
1275 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1276 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1277 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1278 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1279
1280 if (port == PORT_TP)
1281 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1282 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1283
1284 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1285
1286 spin_unlock_bh(&bp->phy_lock);
1287 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1288 spin_lock_bh(&bp->phy_lock);
1289
1290 return 0;
1291}
1292
1293static int
1294bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
b6016b76 1295{
605a9e20 1296 u32 adv, bmcr;
b6016b76
MC
1297 u32 new_adv = 0;
1298
0d8a6571
MC
1299 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1300 return (bnx2_setup_remote_phy(bp, port));
1301
b6016b76
MC
1302 if (!(bp->autoneg & AUTONEG_SPEED)) {
1303 u32 new_bmcr;
5b0c76ad
MC
1304 int force_link_down = 0;
1305
605a9e20
MC
1306 if (bp->req_line_speed == SPEED_2500) {
1307 if (!bnx2_test_and_enable_2g5(bp))
1308 force_link_down = 1;
1309 } else if (bp->req_line_speed == SPEED_1000) {
1310 if (bnx2_test_and_disable_2g5(bp))
1311 force_link_down = 1;
1312 }
ca58c3af 1313 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1314 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1315
ca58c3af 1316 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1317 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1318 new_bmcr |= BMCR_SPEED1000;
605a9e20 1319
27a005b8
MC
1320 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1321 if (bp->req_line_speed == SPEED_2500)
1322 bnx2_enable_forced_2g5(bp);
1323 else if (bp->req_line_speed == SPEED_1000) {
1324 bnx2_disable_forced_2g5(bp);
1325 new_bmcr &= ~0x2000;
1326 }
1327
1328 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1329 if (bp->req_line_speed == SPEED_2500)
1330 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1331 else
1332 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1333 }
1334
b6016b76 1335 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1336 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1337 new_bmcr |= BMCR_FULLDPLX;
1338 }
1339 else {
5b0c76ad 1340 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1341 new_bmcr &= ~BMCR_FULLDPLX;
1342 }
5b0c76ad 1343 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1344 /* Force a link down visible on the other side */
1345 if (bp->link_up) {
ca58c3af 1346 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1347 ~(ADVERTISE_1000XFULL |
1348 ADVERTISE_1000XHALF));
ca58c3af 1349 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1350 BMCR_ANRESTART | BMCR_ANENABLE);
1351
1352 bp->link_up = 0;
1353 netif_carrier_off(bp->dev);
ca58c3af 1354 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1355 bnx2_report_link(bp);
b6016b76 1356 }
ca58c3af
MC
1357 bnx2_write_phy(bp, bp->mii_adv, adv);
1358 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1359 } else {
1360 bnx2_resolve_flow_ctrl(bp);
1361 bnx2_set_mac_link(bp);
b6016b76
MC
1362 }
1363 return 0;
1364 }
1365
605a9e20 1366 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1367
b6016b76
MC
1368 if (bp->advertising & ADVERTISED_1000baseT_Full)
1369 new_adv |= ADVERTISE_1000XFULL;
1370
1371 new_adv |= bnx2_phy_get_pause_adv(bp);
1372
ca58c3af
MC
1373 bnx2_read_phy(bp, bp->mii_adv, &adv);
1374 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1375
1376 bp->serdes_an_pending = 0;
1377 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1378 /* Force a link down visible on the other side */
1379 if (bp->link_up) {
ca58c3af 1380 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1381 spin_unlock_bh(&bp->phy_lock);
1382 msleep(20);
1383 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1384 }
1385
ca58c3af
MC
1386 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1387 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1388 BMCR_ANENABLE);
f8dd064e
MC
1389 /* Speed up link-up time when the link partner
1390 * does not autonegotiate which is very common
1391 * in blade servers. Some blade servers use
1392 * IPMI for kerboard input and it's important
1393 * to minimize link disruptions. Autoneg. involves
1394 * exchanging base pages plus 3 next pages and
1395 * normally completes in about 120 msec.
1396 */
1397 bp->current_interval = SERDES_AN_TIMEOUT;
1398 bp->serdes_an_pending = 1;
1399 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1400 } else {
1401 bnx2_resolve_flow_ctrl(bp);
1402 bnx2_set_mac_link(bp);
b6016b76
MC
1403 }
1404
1405 return 0;
1406}
1407
1408#define ETHTOOL_ALL_FIBRE_SPEED \
deaf391b
MC
1409 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1410 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1411 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1412
1413#define ETHTOOL_ALL_COPPER_SPEED \
1414 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1415 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1416 ADVERTISED_1000baseT_Full)
1417
1418#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1419 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1420
b6016b76
MC
1421#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1422
0d8a6571
MC
1423static void
1424bnx2_set_default_remote_link(struct bnx2 *bp)
1425{
1426 u32 link;
1427
1428 if (bp->phy_port == PORT_TP)
1429 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1430 else
1431 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1432
1433 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1434 bp->req_line_speed = 0;
1435 bp->autoneg |= AUTONEG_SPEED;
1436 bp->advertising = ADVERTISED_Autoneg;
1437 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1438 bp->advertising |= ADVERTISED_10baseT_Half;
1439 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1440 bp->advertising |= ADVERTISED_10baseT_Full;
1441 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1442 bp->advertising |= ADVERTISED_100baseT_Half;
1443 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1444 bp->advertising |= ADVERTISED_100baseT_Full;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1446 bp->advertising |= ADVERTISED_1000baseT_Full;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1448 bp->advertising |= ADVERTISED_2500baseX_Full;
1449 } else {
1450 bp->autoneg = 0;
1451 bp->advertising = 0;
1452 bp->req_duplex = DUPLEX_FULL;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1454 bp->req_line_speed = SPEED_10;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1456 bp->req_duplex = DUPLEX_HALF;
1457 }
1458 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1459 bp->req_line_speed = SPEED_100;
1460 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1461 bp->req_duplex = DUPLEX_HALF;
1462 }
1463 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1464 bp->req_line_speed = SPEED_1000;
1465 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1466 bp->req_line_speed = SPEED_2500;
1467 }
1468}
1469
deaf391b
MC
1470static void
1471bnx2_set_default_link(struct bnx2 *bp)
1472{
0d8a6571
MC
1473 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1474 return bnx2_set_default_remote_link(bp);
1475
deaf391b
MC
1476 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1477 bp->req_line_speed = 0;
1478 if (bp->phy_flags & PHY_SERDES_FLAG) {
1479 u32 reg;
1480
1481 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1482
1483 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1484 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1485 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1486 bp->autoneg = 0;
1487 bp->req_line_speed = bp->line_speed = SPEED_1000;
1488 bp->req_duplex = DUPLEX_FULL;
1489 }
1490 } else
1491 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1492}
1493
df149d70
MC
1494static void
1495bnx2_send_heart_beat(struct bnx2 *bp)
1496{
1497 u32 msg;
1498 u32 addr;
1499
1500 spin_lock(&bp->indirect_lock);
1501 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1502 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1503 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1504 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1505 spin_unlock(&bp->indirect_lock);
1506}
1507
0d8a6571
MC
1508static void
1509bnx2_remote_phy_event(struct bnx2 *bp)
1510{
1511 u32 msg;
1512 u8 link_up = bp->link_up;
1513 u8 old_port;
1514
1515 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1516
df149d70
MC
1517 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1518 bnx2_send_heart_beat(bp);
1519
1520 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1521
0d8a6571
MC
1522 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1523 bp->link_up = 0;
1524 else {
1525 u32 speed;
1526
1527 bp->link_up = 1;
1528 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1529 bp->duplex = DUPLEX_FULL;
1530 switch (speed) {
1531 case BNX2_LINK_STATUS_10HALF:
1532 bp->duplex = DUPLEX_HALF;
1533 case BNX2_LINK_STATUS_10FULL:
1534 bp->line_speed = SPEED_10;
1535 break;
1536 case BNX2_LINK_STATUS_100HALF:
1537 bp->duplex = DUPLEX_HALF;
1538 case BNX2_LINK_STATUS_100BASE_T4:
1539 case BNX2_LINK_STATUS_100FULL:
1540 bp->line_speed = SPEED_100;
1541 break;
1542 case BNX2_LINK_STATUS_1000HALF:
1543 bp->duplex = DUPLEX_HALF;
1544 case BNX2_LINK_STATUS_1000FULL:
1545 bp->line_speed = SPEED_1000;
1546 break;
1547 case BNX2_LINK_STATUS_2500HALF:
1548 bp->duplex = DUPLEX_HALF;
1549 case BNX2_LINK_STATUS_2500FULL:
1550 bp->line_speed = SPEED_2500;
1551 break;
1552 default:
1553 bp->line_speed = 0;
1554 break;
1555 }
1556
1557 spin_lock(&bp->phy_lock);
1558 bp->flow_ctrl = 0;
1559 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1560 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1561 if (bp->duplex == DUPLEX_FULL)
1562 bp->flow_ctrl = bp->req_flow_ctrl;
1563 } else {
1564 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1565 bp->flow_ctrl |= FLOW_CTRL_TX;
1566 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1567 bp->flow_ctrl |= FLOW_CTRL_RX;
1568 }
1569
1570 old_port = bp->phy_port;
1571 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1572 bp->phy_port = PORT_FIBRE;
1573 else
1574 bp->phy_port = PORT_TP;
1575
1576 if (old_port != bp->phy_port)
1577 bnx2_set_default_link(bp);
1578
1579 spin_unlock(&bp->phy_lock);
1580 }
1581 if (bp->link_up != link_up)
1582 bnx2_report_link(bp);
1583
1584 bnx2_set_mac_link(bp);
1585}
1586
1587static int
1588bnx2_set_remote_link(struct bnx2 *bp)
1589{
1590 u32 evt_code;
1591
1592 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1593 switch (evt_code) {
1594 case BNX2_FW_EVT_CODE_LINK_EVENT:
1595 bnx2_remote_phy_event(bp);
1596 break;
1597 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1598 default:
df149d70 1599 bnx2_send_heart_beat(bp);
0d8a6571
MC
1600 break;
1601 }
1602 return 0;
1603}
1604
b6016b76
MC
1605static int
1606bnx2_setup_copper_phy(struct bnx2 *bp)
1607{
1608 u32 bmcr;
1609 u32 new_bmcr;
1610
ca58c3af 1611 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1612
1613 if (bp->autoneg & AUTONEG_SPEED) {
1614 u32 adv_reg, adv1000_reg;
1615 u32 new_adv_reg = 0;
1616 u32 new_adv1000_reg = 0;
1617
ca58c3af 1618 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1619 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1620 ADVERTISE_PAUSE_ASYM);
1621
1622 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1623 adv1000_reg &= PHY_ALL_1000_SPEED;
1624
1625 if (bp->advertising & ADVERTISED_10baseT_Half)
1626 new_adv_reg |= ADVERTISE_10HALF;
1627 if (bp->advertising & ADVERTISED_10baseT_Full)
1628 new_adv_reg |= ADVERTISE_10FULL;
1629 if (bp->advertising & ADVERTISED_100baseT_Half)
1630 new_adv_reg |= ADVERTISE_100HALF;
1631 if (bp->advertising & ADVERTISED_100baseT_Full)
1632 new_adv_reg |= ADVERTISE_100FULL;
1633 if (bp->advertising & ADVERTISED_1000baseT_Full)
1634 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1635
b6016b76
MC
1636 new_adv_reg |= ADVERTISE_CSMA;
1637
1638 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1639
1640 if ((adv1000_reg != new_adv1000_reg) ||
1641 (adv_reg != new_adv_reg) ||
1642 ((bmcr & BMCR_ANENABLE) == 0)) {
1643
ca58c3af 1644 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1645 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1646 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1647 BMCR_ANENABLE);
1648 }
1649 else if (bp->link_up) {
1650 /* Flow ctrl may have changed from auto to forced */
1651 /* or vice-versa. */
1652
1653 bnx2_resolve_flow_ctrl(bp);
1654 bnx2_set_mac_link(bp);
1655 }
1656 return 0;
1657 }
1658
1659 new_bmcr = 0;
1660 if (bp->req_line_speed == SPEED_100) {
1661 new_bmcr |= BMCR_SPEED100;
1662 }
1663 if (bp->req_duplex == DUPLEX_FULL) {
1664 new_bmcr |= BMCR_FULLDPLX;
1665 }
1666 if (new_bmcr != bmcr) {
1667 u32 bmsr;
b6016b76 1668
ca58c3af
MC
1669 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1670 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1671
b6016b76
MC
1672 if (bmsr & BMSR_LSTATUS) {
1673 /* Force link down */
ca58c3af 1674 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1675 spin_unlock_bh(&bp->phy_lock);
1676 msleep(50);
1677 spin_lock_bh(&bp->phy_lock);
1678
ca58c3af
MC
1679 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1680 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1681 }
1682
ca58c3af 1683 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1684
1685 /* Normally, the new speed is setup after the link has
1686 * gone down and up again. In some cases, link will not go
1687 * down so we need to set up the new speed here.
1688 */
1689 if (bmsr & BMSR_LSTATUS) {
1690 bp->line_speed = bp->req_line_speed;
1691 bp->duplex = bp->req_duplex;
1692 bnx2_resolve_flow_ctrl(bp);
1693 bnx2_set_mac_link(bp);
1694 }
27a005b8
MC
1695 } else {
1696 bnx2_resolve_flow_ctrl(bp);
1697 bnx2_set_mac_link(bp);
b6016b76
MC
1698 }
1699 return 0;
1700}
1701
1702static int
0d8a6571 1703bnx2_setup_phy(struct bnx2 *bp, u8 port)
b6016b76
MC
1704{
1705 if (bp->loopback == MAC_LOOPBACK)
1706 return 0;
1707
1708 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 1709 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1710 }
1711 else {
1712 return (bnx2_setup_copper_phy(bp));
1713 }
1714}
1715
27a005b8
MC
1716static int
1717bnx2_init_5709s_phy(struct bnx2 *bp)
1718{
1719 u32 val;
1720
1721 bp->mii_bmcr = MII_BMCR + 0x10;
1722 bp->mii_bmsr = MII_BMSR + 0x10;
1723 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1724 bp->mii_adv = MII_ADVERTISE + 0x10;
1725 bp->mii_lpa = MII_LPA + 0x10;
1726 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1727
1728 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1729 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1730
1731 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1732 bnx2_reset_phy(bp);
1733
1734 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1735
1736 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1737 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1738 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1739 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1740
1741 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1742 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1743 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1744 val |= BCM5708S_UP1_2G5;
1745 else
1746 val &= ~BCM5708S_UP1_2G5;
1747 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1748
1749 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1750 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1751 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1752 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1753
1754 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1755
1756 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1757 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1758 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1759
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761
1762 return 0;
1763}
1764
b6016b76 1765static int
5b0c76ad
MC
1766bnx2_init_5708s_phy(struct bnx2 *bp)
1767{
1768 u32 val;
1769
27a005b8
MC
1770 bnx2_reset_phy(bp);
1771
1772 bp->mii_up1 = BCM5708S_UP1;
1773
5b0c76ad
MC
1774 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1775 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1776 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1777
1778 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1779 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1780 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1781
1782 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1783 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1784 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1785
1786 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1787 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1788 val |= BCM5708S_UP1_2G5;
1789 bnx2_write_phy(bp, BCM5708S_UP1, val);
1790 }
1791
1792 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1793 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1794 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1795 /* increase tx signal amplitude */
1796 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1797 BCM5708S_BLK_ADDR_TX_MISC);
1798 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1799 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1800 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1801 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1802 }
1803
e3648b3d 1804 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1805 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1806
1807 if (val) {
1808 u32 is_backplane;
1809
e3648b3d 1810 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1811 BNX2_SHARED_HW_CFG_CONFIG);
1812 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1813 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1814 BCM5708S_BLK_ADDR_TX_MISC);
1815 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1816 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1817 BCM5708S_BLK_ADDR_DIG);
1818 }
1819 }
1820 return 0;
1821}
1822
1823static int
1824bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76 1825{
27a005b8
MC
1826 bnx2_reset_phy(bp);
1827
b6016b76
MC
1828 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1829
59b47d8a
MC
1830 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1831 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
1832
1833 if (bp->dev->mtu > 1500) {
1834 u32 val;
1835
1836 /* Set extended packet length bit */
1837 bnx2_write_phy(bp, 0x18, 0x7);
1838 bnx2_read_phy(bp, 0x18, &val);
1839 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1840
1841 bnx2_write_phy(bp, 0x1c, 0x6c00);
1842 bnx2_read_phy(bp, 0x1c, &val);
1843 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1844 }
1845 else {
1846 u32 val;
1847
1848 bnx2_write_phy(bp, 0x18, 0x7);
1849 bnx2_read_phy(bp, 0x18, &val);
1850 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1851
1852 bnx2_write_phy(bp, 0x1c, 0x6c00);
1853 bnx2_read_phy(bp, 0x1c, &val);
1854 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1855 }
1856
1857 return 0;
1858}
1859
1860static int
1861bnx2_init_copper_phy(struct bnx2 *bp)
1862{
5b0c76ad
MC
1863 u32 val;
1864
27a005b8
MC
1865 bnx2_reset_phy(bp);
1866
b6016b76
MC
1867 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1868 bnx2_write_phy(bp, 0x18, 0x0c00);
1869 bnx2_write_phy(bp, 0x17, 0x000a);
1870 bnx2_write_phy(bp, 0x15, 0x310b);
1871 bnx2_write_phy(bp, 0x17, 0x201f);
1872 bnx2_write_phy(bp, 0x15, 0x9506);
1873 bnx2_write_phy(bp, 0x17, 0x401f);
1874 bnx2_write_phy(bp, 0x15, 0x14e2);
1875 bnx2_write_phy(bp, 0x18, 0x0400);
1876 }
1877
b659f44e
MC
1878 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1879 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1880 MII_BNX2_DSP_EXPAND_REG | 0x8);
1881 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1882 val &= ~(1 << 8);
1883 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1884 }
1885
b6016b76 1886 if (bp->dev->mtu > 1500) {
b6016b76
MC
1887 /* Set extended packet length bit */
1888 bnx2_write_phy(bp, 0x18, 0x7);
1889 bnx2_read_phy(bp, 0x18, &val);
1890 bnx2_write_phy(bp, 0x18, val | 0x4000);
1891
1892 bnx2_read_phy(bp, 0x10, &val);
1893 bnx2_write_phy(bp, 0x10, val | 0x1);
1894 }
1895 else {
b6016b76
MC
1896 bnx2_write_phy(bp, 0x18, 0x7);
1897 bnx2_read_phy(bp, 0x18, &val);
1898 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1899
1900 bnx2_read_phy(bp, 0x10, &val);
1901 bnx2_write_phy(bp, 0x10, val & ~0x1);
1902 }
1903
5b0c76ad
MC
1904 /* ethernet@wirespeed */
1905 bnx2_write_phy(bp, 0x18, 0x7007);
1906 bnx2_read_phy(bp, 0x18, &val);
1907 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1908 return 0;
1909}
1910
1911
1912static int
1913bnx2_init_phy(struct bnx2 *bp)
1914{
1915 u32 val;
1916 int rc = 0;
1917
1918 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1919 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1920
ca58c3af
MC
1921 bp->mii_bmcr = MII_BMCR;
1922 bp->mii_bmsr = MII_BMSR;
27a005b8 1923 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
1924 bp->mii_adv = MII_ADVERTISE;
1925 bp->mii_lpa = MII_LPA;
1926
b6016b76
MC
1927 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1928
0d8a6571
MC
1929 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1930 goto setup_phy;
1931
b6016b76
MC
1932 bnx2_read_phy(bp, MII_PHYSID1, &val);
1933 bp->phy_id = val << 16;
1934 bnx2_read_phy(bp, MII_PHYSID2, &val);
1935 bp->phy_id |= val & 0xffff;
1936
1937 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1938 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1939 rc = bnx2_init_5706s_phy(bp);
1940 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1941 rc = bnx2_init_5708s_phy(bp);
27a005b8
MC
1942 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1943 rc = bnx2_init_5709s_phy(bp);
b6016b76
MC
1944 }
1945 else {
1946 rc = bnx2_init_copper_phy(bp);
1947 }
1948
0d8a6571
MC
1949setup_phy:
1950 if (!rc)
1951 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
1952
1953 return rc;
1954}
1955
1956static int
1957bnx2_set_mac_loopback(struct bnx2 *bp)
1958{
1959 u32 mac_mode;
1960
1961 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1962 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1963 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1964 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1965 bp->link_up = 1;
1966 return 0;
1967}
1968
bc5a0690
MC
1969static int bnx2_test_link(struct bnx2 *);
1970
1971static int
1972bnx2_set_phy_loopback(struct bnx2 *bp)
1973{
1974 u32 mac_mode;
1975 int rc, i;
1976
1977 spin_lock_bh(&bp->phy_lock);
ca58c3af 1978 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
1979 BMCR_SPEED1000);
1980 spin_unlock_bh(&bp->phy_lock);
1981 if (rc)
1982 return rc;
1983
1984 for (i = 0; i < 10; i++) {
1985 if (bnx2_test_link(bp) == 0)
1986 break;
80be4434 1987 msleep(100);
bc5a0690
MC
1988 }
1989
1990 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1992 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1993 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
1994
1995 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1996 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1997 bp->link_up = 1;
1998 return 0;
1999}
2000
b6016b76 2001static int
b090ae2b 2002bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
2003{
2004 int i;
2005 u32 val;
2006
b6016b76
MC
2007 bp->fw_wr_seq++;
2008 msg_data |= bp->fw_wr_seq;
2009
e3648b3d 2010 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
2011
2012 /* wait for an acknowledgement. */
b090ae2b
MC
2013 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2014 msleep(10);
b6016b76 2015
e3648b3d 2016 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
2017
2018 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2019 break;
2020 }
b090ae2b
MC
2021 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2022 return 0;
b6016b76
MC
2023
2024 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2025 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2026 if (!silent)
2027 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2028 "%x\n", msg_data);
b6016b76
MC
2029
2030 msg_data &= ~BNX2_DRV_MSG_CODE;
2031 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2032
e3648b3d 2033 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 2034
b6016b76
MC
2035 return -EBUSY;
2036 }
2037
b090ae2b
MC
2038 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2039 return -EIO;
2040
b6016b76
MC
2041 return 0;
2042}
2043
59b47d8a
MC
2044static int
2045bnx2_init_5709_context(struct bnx2 *bp)
2046{
2047 int i, ret = 0;
2048 u32 val;
2049
2050 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2051 val |= (BCM_PAGE_BITS - 8) << 16;
2052 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2053 for (i = 0; i < 10; i++) {
2054 val = REG_RD(bp, BNX2_CTX_COMMAND);
2055 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2056 break;
2057 udelay(2);
2058 }
2059 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2060 return -EBUSY;
2061
59b47d8a
MC
2062 for (i = 0; i < bp->ctx_pages; i++) {
2063 int j;
2064
2065 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2066 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2067 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2068 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2069 (u64) bp->ctx_blk_mapping[i] >> 32);
2070 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2071 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2072 for (j = 0; j < 10; j++) {
2073
2074 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2075 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2076 break;
2077 udelay(5);
2078 }
2079 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2080 ret = -EBUSY;
2081 break;
2082 }
2083 }
2084 return ret;
2085}
2086
b6016b76
MC
2087static void
2088bnx2_init_context(struct bnx2 *bp)
2089{
2090 u32 vcid;
2091
2092 vcid = 96;
2093 while (vcid) {
2094 u32 vcid_addr, pcid_addr, offset;
7947b20e 2095 int i;
b6016b76
MC
2096
2097 vcid--;
2098
2099 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2100 u32 new_vcid;
2101
2102 vcid_addr = GET_PCID_ADDR(vcid);
2103 if (vcid & 0x8) {
2104 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2105 }
2106 else {
2107 new_vcid = vcid;
2108 }
2109 pcid_addr = GET_PCID_ADDR(new_vcid);
2110 }
2111 else {
2112 vcid_addr = GET_CID_ADDR(vcid);
2113 pcid_addr = vcid_addr;
2114 }
2115
7947b20e
MC
2116 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2117 vcid_addr += (i << PHY_CTX_SHIFT);
2118 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2119
7947b20e
MC
2120 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2121 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2122
7947b20e
MC
2123 /* Zero out the context. */
2124 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2125 CTX_WR(bp, 0x00, offset, 0);
2126
2127 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2128 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2129 }
b6016b76
MC
2130 }
2131}
2132
2133static int
2134bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2135{
2136 u16 *good_mbuf;
2137 u32 good_mbuf_cnt;
2138 u32 val;
2139
2140 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2141 if (good_mbuf == NULL) {
2142 printk(KERN_ERR PFX "Failed to allocate memory in "
2143 "bnx2_alloc_bad_rbuf\n");
2144 return -ENOMEM;
2145 }
2146
2147 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2148 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2149
2150 good_mbuf_cnt = 0;
2151
2152 /* Allocate a bunch of mbufs and save the good ones in an array. */
2153 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2154 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2155 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2156
2157 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2158
2159 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2160
2161 /* The addresses with Bit 9 set are bad memory blocks. */
2162 if (!(val & (1 << 9))) {
2163 good_mbuf[good_mbuf_cnt] = (u16) val;
2164 good_mbuf_cnt++;
2165 }
2166
2167 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2168 }
2169
2170 /* Free the good ones back to the mbuf pool thus discarding
2171 * all the bad ones. */
2172 while (good_mbuf_cnt) {
2173 good_mbuf_cnt--;
2174
2175 val = good_mbuf[good_mbuf_cnt];
2176 val = (val << 9) | val | 1;
2177
2178 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2179 }
2180 kfree(good_mbuf);
2181 return 0;
2182}
2183
2184static void
6aa20a22 2185bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
2186{
2187 u32 val;
2188 u8 *mac_addr = bp->dev->dev_addr;
2189
2190 val = (mac_addr[0] << 8) | mac_addr[1];
2191
2192 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2193
6aa20a22 2194 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2195 (mac_addr[4] << 8) | mac_addr[5];
2196
2197 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2198}
2199
2200static inline int
2201bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2202{
2203 struct sk_buff *skb;
2204 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2205 dma_addr_t mapping;
13daffa2 2206 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2207 unsigned long align;
2208
932f3772 2209 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2210 if (skb == NULL) {
2211 return -ENOMEM;
2212 }
2213
59b47d8a
MC
2214 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2215 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2216
b6016b76
MC
2217 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2218 PCI_DMA_FROMDEVICE);
2219
2220 rx_buf->skb = skb;
2221 pci_unmap_addr_set(rx_buf, mapping, mapping);
2222
2223 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2224 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2225
2226 bp->rx_prod_bseq += bp->rx_buf_use_size;
2227
2228 return 0;
2229}
2230
da3e4fbe
MC
2231static int
2232bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
b6016b76 2233{
da3e4fbe 2234 struct status_block *sblk = bp->status_blk;
b6016b76 2235 u32 new_link_state, old_link_state;
da3e4fbe 2236 int is_set = 1;
b6016b76 2237
da3e4fbe
MC
2238 new_link_state = sblk->status_attn_bits & event;
2239 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2240 if (new_link_state != old_link_state) {
da3e4fbe
MC
2241 if (new_link_state)
2242 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2243 else
2244 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2245 } else
2246 is_set = 0;
2247
2248 return is_set;
2249}
2250
2251static void
2252bnx2_phy_int(struct bnx2 *bp)
2253{
2254 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2255 spin_lock(&bp->phy_lock);
b6016b76 2256 bnx2_set_link(bp);
da3e4fbe 2257 spin_unlock(&bp->phy_lock);
b6016b76 2258 }
0d8a6571
MC
2259 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2260 bnx2_set_remote_link(bp);
2261
b6016b76
MC
2262}
2263
2264static void
2265bnx2_tx_int(struct bnx2 *bp)
2266{
f4e418f7 2267 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2268 u16 hw_cons, sw_cons, sw_ring_cons;
2269 int tx_free_bd = 0;
2270
f4e418f7 2271 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
b6016b76
MC
2272 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2273 hw_cons++;
2274 }
2275 sw_cons = bp->tx_cons;
2276
2277 while (sw_cons != hw_cons) {
2278 struct sw_bd *tx_buf;
2279 struct sk_buff *skb;
2280 int i, last;
2281
2282 sw_ring_cons = TX_RING_IDX(sw_cons);
2283
2284 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2285 skb = tx_buf->skb;
1d39ed56 2286
b6016b76 2287 /* partial BD completions possible with TSO packets */
89114afd 2288 if (skb_is_gso(skb)) {
b6016b76
MC
2289 u16 last_idx, last_ring_idx;
2290
2291 last_idx = sw_cons +
2292 skb_shinfo(skb)->nr_frags + 1;
2293 last_ring_idx = sw_ring_cons +
2294 skb_shinfo(skb)->nr_frags + 1;
2295 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2296 last_idx++;
2297 }
2298 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2299 break;
2300 }
2301 }
1d39ed56 2302
b6016b76
MC
2303 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2304 skb_headlen(skb), PCI_DMA_TODEVICE);
2305
2306 tx_buf->skb = NULL;
2307 last = skb_shinfo(skb)->nr_frags;
2308
2309 for (i = 0; i < last; i++) {
2310 sw_cons = NEXT_TX_BD(sw_cons);
2311
2312 pci_unmap_page(bp->pdev,
2313 pci_unmap_addr(
2314 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2315 mapping),
2316 skb_shinfo(skb)->frags[i].size,
2317 PCI_DMA_TODEVICE);
2318 }
2319
2320 sw_cons = NEXT_TX_BD(sw_cons);
2321
2322 tx_free_bd += last + 1;
2323
745720e5 2324 dev_kfree_skb(skb);
b6016b76 2325
f4e418f7
MC
2326 hw_cons = bp->hw_tx_cons =
2327 sblk->status_tx_quick_consumer_index0;
2328
b6016b76
MC
2329 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2330 hw_cons++;
2331 }
2332 }
2333
e89bbf10 2334 bp->tx_cons = sw_cons;
2f8af120
MC
2335 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2336 * before checking for netif_queue_stopped(). Without the
2337 * memory barrier, there is a small possibility that bnx2_start_xmit()
2338 * will miss it and cause the queue to be stopped forever.
2339 */
2340 smp_mb();
b6016b76 2341
2f8af120
MC
2342 if (unlikely(netif_queue_stopped(bp->dev)) &&
2343 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2344 netif_tx_lock(bp->dev);
b6016b76 2345 if ((netif_queue_stopped(bp->dev)) &&
2f8af120 2346 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
b6016b76 2347 netif_wake_queue(bp->dev);
2f8af120 2348 netif_tx_unlock(bp->dev);
b6016b76 2349 }
b6016b76
MC
2350}
2351
2352static inline void
2353bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2354 u16 cons, u16 prod)
2355{
236b6394
MC
2356 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2357 struct rx_bd *cons_bd, *prod_bd;
2358
2359 cons_rx_buf = &bp->rx_buf_ring[cons];
2360 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
2361
2362 pci_dma_sync_single_for_device(bp->pdev,
2363 pci_unmap_addr(cons_rx_buf, mapping),
2364 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2365
236b6394 2366 bp->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2367
236b6394 2368 prod_rx_buf->skb = skb;
b6016b76 2369
236b6394
MC
2370 if (cons == prod)
2371 return;
b6016b76 2372
236b6394
MC
2373 pci_unmap_addr_set(prod_rx_buf, mapping,
2374 pci_unmap_addr(cons_rx_buf, mapping));
2375
3fdfcc2c
MC
2376 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2377 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2378 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2379 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2380}
2381
85833c62
MC
2382static int
2383bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2384 dma_addr_t dma_addr, u32 ring_idx)
2385{
2386 int err;
2387 u16 prod = ring_idx & 0xffff;
2388
2389 err = bnx2_alloc_rx_skb(bp, prod);
2390 if (unlikely(err)) {
2391 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2392 return err;
2393 }
2394
2395 skb_reserve(skb, bp->rx_offset);
2396 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2397 PCI_DMA_FROMDEVICE);
2398
2399 skb_put(skb, len);
2400 return 0;
2401}
2402
c09c2627
MC
2403static inline u16
2404bnx2_get_hw_rx_cons(struct bnx2 *bp)
2405{
2406 u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2407
2408 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2409 cons++;
2410 return cons;
2411}
2412
b6016b76
MC
2413static int
2414bnx2_rx_int(struct bnx2 *bp, int budget)
2415{
2416 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2417 struct l2_fhdr *rx_hdr;
2418 int rx_pkt = 0;
2419
c09c2627 2420 hw_cons = bnx2_get_hw_rx_cons(bp);
b6016b76
MC
2421 sw_cons = bp->rx_cons;
2422 sw_prod = bp->rx_prod;
2423
2424 /* Memory barrier necessary as speculative reads of the rx
2425 * buffer can be ahead of the index in the status block
2426 */
2427 rmb();
2428 while (sw_cons != hw_cons) {
2429 unsigned int len;
ade2bfe7 2430 u32 status;
b6016b76
MC
2431 struct sw_bd *rx_buf;
2432 struct sk_buff *skb;
236b6394 2433 dma_addr_t dma_addr;
b6016b76
MC
2434
2435 sw_ring_cons = RX_RING_IDX(sw_cons);
2436 sw_ring_prod = RX_RING_IDX(sw_prod);
2437
2438 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2439 skb = rx_buf->skb;
236b6394
MC
2440
2441 rx_buf->skb = NULL;
2442
2443 dma_addr = pci_unmap_addr(rx_buf, mapping);
2444
2445 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
2446 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2447
2448 rx_hdr = (struct l2_fhdr *) skb->data;
2449 len = rx_hdr->l2_fhdr_pkt_len - 4;
2450
ade2bfe7 2451 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
2452 (L2_FHDR_ERRORS_BAD_CRC |
2453 L2_FHDR_ERRORS_PHY_DECODE |
2454 L2_FHDR_ERRORS_ALIGNMENT |
2455 L2_FHDR_ERRORS_TOO_SHORT |
2456 L2_FHDR_ERRORS_GIANT_FRAME)) {
2457
85833c62
MC
2458 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2459 goto next_rx;
b6016b76
MC
2460 }
2461
2462 /* Since we don't have a jumbo ring, copy small packets
2463 * if mtu > 1500
2464 */
2465 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2466 struct sk_buff *new_skb;
2467
932f3772 2468 new_skb = netdev_alloc_skb(bp->dev, len + 2);
85833c62
MC
2469 if (new_skb == NULL) {
2470 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2471 sw_ring_prod);
2472 goto next_rx;
2473 }
b6016b76
MC
2474
2475 /* aligned copy */
d626f62b
ACM
2476 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2477 new_skb->data, len + 2);
b6016b76
MC
2478 skb_reserve(new_skb, 2);
2479 skb_put(new_skb, len);
b6016b76
MC
2480
2481 bnx2_reuse_rx_skb(bp, skb,
2482 sw_ring_cons, sw_ring_prod);
2483
2484 skb = new_skb;
85833c62
MC
2485 } else if (unlikely(bnx2_rx_skb(bp, skb, len, dma_addr,
2486 (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 2487 goto next_rx;
b6016b76
MC
2488
2489 skb->protocol = eth_type_trans(skb, bp->dev);
2490
2491 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 2492 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 2493
745720e5 2494 dev_kfree_skb(skb);
b6016b76
MC
2495 goto next_rx;
2496
2497 }
2498
b6016b76
MC
2499 skb->ip_summed = CHECKSUM_NONE;
2500 if (bp->rx_csum &&
2501 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2502 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2503
ade2bfe7
MC
2504 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2505 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
2506 skb->ip_summed = CHECKSUM_UNNECESSARY;
2507 }
2508
2509#ifdef BCM_VLAN
2510 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2511 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2512 rx_hdr->l2_fhdr_vlan_tag);
2513 }
2514 else
2515#endif
2516 netif_receive_skb(skb);
2517
2518 bp->dev->last_rx = jiffies;
2519 rx_pkt++;
2520
2521next_rx:
b6016b76
MC
2522 sw_cons = NEXT_RX_BD(sw_cons);
2523 sw_prod = NEXT_RX_BD(sw_prod);
2524
2525 if ((rx_pkt == budget))
2526 break;
f4e418f7
MC
2527
2528 /* Refresh hw_cons to see if there is new work */
2529 if (sw_cons == hw_cons) {
c09c2627 2530 hw_cons = bnx2_get_hw_rx_cons(bp);
f4e418f7
MC
2531 rmb();
2532 }
b6016b76
MC
2533 }
2534 bp->rx_cons = sw_cons;
2535 bp->rx_prod = sw_prod;
2536
2537 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2538
2539 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2540
2541 mmiowb();
2542
2543 return rx_pkt;
2544
2545}
2546
2547/* MSI ISR - The only difference between this and the INTx ISR
2548 * is that the MSI interrupt is always serviced.
2549 */
2550static irqreturn_t
7d12e780 2551bnx2_msi(int irq, void *dev_instance)
b6016b76
MC
2552{
2553 struct net_device *dev = dev_instance;
972ec0d4 2554 struct bnx2 *bp = netdev_priv(dev);
b6016b76 2555
c921e4c4 2556 prefetch(bp->status_blk);
b6016b76
MC
2557 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2558 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2559 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2560
2561 /* Return here if interrupt is disabled. */
73eef4cd
MC
2562 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2563 return IRQ_HANDLED;
b6016b76 2564
bea3348e 2565 netif_rx_schedule(dev, &bp->napi);
b6016b76 2566
73eef4cd 2567 return IRQ_HANDLED;
b6016b76
MC
2568}
2569
8e6a72c4
MC
2570static irqreturn_t
2571bnx2_msi_1shot(int irq, void *dev_instance)
2572{
2573 struct net_device *dev = dev_instance;
2574 struct bnx2 *bp = netdev_priv(dev);
2575
2576 prefetch(bp->status_blk);
2577
2578 /* Return here if interrupt is disabled. */
2579 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2580 return IRQ_HANDLED;
2581
bea3348e 2582 netif_rx_schedule(dev, &bp->napi);
8e6a72c4
MC
2583
2584 return IRQ_HANDLED;
2585}
2586
b6016b76 2587static irqreturn_t
7d12e780 2588bnx2_interrupt(int irq, void *dev_instance)
b6016b76
MC
2589{
2590 struct net_device *dev = dev_instance;
972ec0d4 2591 struct bnx2 *bp = netdev_priv(dev);
b8a7ce7b 2592 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2593
2594 /* When using INTx, it is possible for the interrupt to arrive
2595 * at the CPU before the status block posted prior to the
2596 * interrupt. Reading a register will flush the status block.
2597 * When using MSI, the MSI message will always complete after
2598 * the status block write.
2599 */
b8a7ce7b 2600 if ((sblk->status_idx == bp->last_status_idx) &&
b6016b76
MC
2601 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2602 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 2603 return IRQ_NONE;
b6016b76
MC
2604
2605 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2606 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2607 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2608
b8a7ce7b
MC
2609 /* Read back to deassert IRQ immediately to avoid too many
2610 * spurious interrupts.
2611 */
2612 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2613
b6016b76 2614 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
2615 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2616 return IRQ_HANDLED;
b6016b76 2617
bea3348e 2618 if (netif_rx_schedule_prep(dev, &bp->napi)) {
b8a7ce7b 2619 bp->last_status_idx = sblk->status_idx;
bea3348e 2620 __netif_rx_schedule(dev, &bp->napi);
b8a7ce7b 2621 }
b6016b76 2622
73eef4cd 2623 return IRQ_HANDLED;
b6016b76
MC
2624}
2625
0d8a6571
MC
2626#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2627 STATUS_ATTN_BITS_TIMER_ABORT)
da3e4fbe 2628
f4e418f7
MC
2629static inline int
2630bnx2_has_work(struct bnx2 *bp)
2631{
2632 struct status_block *sblk = bp->status_blk;
2633
c09c2627 2634 if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
f4e418f7
MC
2635 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2636 return 1;
2637
da3e4fbe
MC
2638 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2639 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
2640 return 1;
2641
2642 return 0;
2643}
2644
6f535763 2645static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
b6016b76 2646{
da3e4fbe
MC
2647 struct status_block *sblk = bp->status_blk;
2648 u32 status_attn_bits = sblk->status_attn_bits;
2649 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 2650
da3e4fbe
MC
2651 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2652 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 2653
b6016b76 2654 bnx2_phy_int(bp);
bf5295bb
MC
2655
2656 /* This is needed to take care of transient status
2657 * during link changes.
2658 */
2659 REG_WR(bp, BNX2_HC_COMMAND,
2660 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2661 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
2662 }
2663
6dee6421 2664 if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
b6016b76 2665 bnx2_tx_int(bp);
b6016b76 2666
c09c2627 2667 if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
6f535763 2668 work_done += bnx2_rx_int(bp, budget - work_done);
6aa20a22 2669
6f535763
DM
2670 return work_done;
2671}
2672
2673static int bnx2_poll(struct napi_struct *napi, int budget)
2674{
2675 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2676 int work_done = 0;
6dee6421 2677 struct status_block *sblk = bp->status_blk;
6f535763
DM
2678
2679 while (1) {
2680 work_done = bnx2_poll_work(bp, work_done, budget);
f4e418f7 2681
6f535763
DM
2682 if (unlikely(work_done >= budget))
2683 break;
2684
6dee6421
MC
2685 /* bp->last_status_idx is used below to tell the hw how
2686 * much work has been processed, so we must read it before
2687 * checking for more work.
2688 */
2689 bp->last_status_idx = sblk->status_idx;
2690 rmb();
6f535763 2691 if (likely(!bnx2_has_work(bp))) {
6f535763
DM
2692 netif_rx_complete(bp->dev, napi);
2693 if (likely(bp->flags & USING_MSI_FLAG)) {
2694 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2695 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2696 bp->last_status_idx);
6dee6421 2697 break;
6f535763 2698 }
1269a8a6
MC
2699 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2700 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 2701 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
1269a8a6 2702 bp->last_status_idx);
1269a8a6 2703
6f535763
DM
2704 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2705 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2706 bp->last_status_idx);
2707 break;
2708 }
b6016b76
MC
2709 }
2710
bea3348e 2711 return work_done;
b6016b76
MC
2712}
2713
932ff279 2714/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
2715 * from set_multicast.
2716 */
2717static void
2718bnx2_set_rx_mode(struct net_device *dev)
2719{
972ec0d4 2720 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2721 u32 rx_mode, sort_mode;
2722 int i;
b6016b76 2723
c770a65c 2724 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
2725
2726 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2727 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2728 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2729#ifdef BCM_VLAN
e29054f9 2730 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 2731 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 2732#else
e29054f9
MC
2733 if (!(bp->flags & ASF_ENABLE_FLAG))
2734 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
2735#endif
2736 if (dev->flags & IFF_PROMISC) {
2737 /* Promiscuous mode. */
2738 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
2739 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2740 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
2741 }
2742 else if (dev->flags & IFF_ALLMULTI) {
2743 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2744 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2745 0xffffffff);
2746 }
2747 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2748 }
2749 else {
2750 /* Accept one or more multicast(s). */
2751 struct dev_mc_list *mclist;
2752 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2753 u32 regidx;
2754 u32 bit;
2755 u32 crc;
2756
2757 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2758
2759 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2760 i++, mclist = mclist->next) {
2761
2762 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2763 bit = crc & 0xff;
2764 regidx = (bit & 0xe0) >> 5;
2765 bit &= 0x1f;
2766 mc_filter[regidx] |= (1 << bit);
2767 }
2768
2769 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2770 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2771 mc_filter[i]);
2772 }
2773
2774 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2775 }
2776
2777 if (rx_mode != bp->rx_mode) {
2778 bp->rx_mode = rx_mode;
2779 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2780 }
2781
2782 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2783 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2784 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2785
c770a65c 2786 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
2787}
2788
2789static void
2790load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2791 u32 rv2p_proc)
2792{
2793 int i;
2794 u32 val;
2795
2796
2797 for (i = 0; i < rv2p_code_len; i += 8) {
fba9fe91 2798 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
b6016b76 2799 rv2p_code++;
fba9fe91 2800 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
b6016b76
MC
2801 rv2p_code++;
2802
2803 if (rv2p_proc == RV2P_PROC1) {
2804 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2805 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2806 }
2807 else {
2808 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2809 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2810 }
2811 }
2812
2813 /* Reset the processor, un-stall is done later. */
2814 if (rv2p_proc == RV2P_PROC1) {
2815 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2816 }
2817 else {
2818 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2819 }
2820}
2821
af3ee519 2822static int
b6016b76
MC
2823load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2824{
2825 u32 offset;
2826 u32 val;
af3ee519 2827 int rc;
b6016b76
MC
2828
2829 /* Halt the CPU. */
2830 val = REG_RD_IND(bp, cpu_reg->mode);
2831 val |= cpu_reg->mode_value_halt;
2832 REG_WR_IND(bp, cpu_reg->mode, val);
2833 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2834
2835 /* Load the Text area. */
2836 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519 2837 if (fw->gz_text) {
b6016b76
MC
2838 int j;
2839
ea1f8d5c
MC
2840 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
2841 fw->gz_text_len);
2842 if (rc < 0)
b3448b0b 2843 return rc;
ea1f8d5c 2844
b6016b76 2845 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
ea1f8d5c 2846 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
b6016b76
MC
2847 }
2848 }
2849
2850 /* Load the Data area. */
2851 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2852 if (fw->data) {
2853 int j;
2854
2855 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2856 REG_WR_IND(bp, offset, fw->data[j]);
2857 }
2858 }
2859
2860 /* Load the SBSS area. */
2861 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
ea1f8d5c 2862 if (fw->sbss_len) {
b6016b76
MC
2863 int j;
2864
2865 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
ea1f8d5c 2866 REG_WR_IND(bp, offset, 0);
b6016b76
MC
2867 }
2868 }
2869
2870 /* Load the BSS area. */
2871 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
ea1f8d5c 2872 if (fw->bss_len) {
b6016b76
MC
2873 int j;
2874
2875 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
ea1f8d5c 2876 REG_WR_IND(bp, offset, 0);
b6016b76
MC
2877 }
2878 }
2879
2880 /* Load the Read-Only area. */
2881 offset = cpu_reg->spad_base +
2882 (fw->rodata_addr - cpu_reg->mips_view_base);
2883 if (fw->rodata) {
2884 int j;
2885
2886 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2887 REG_WR_IND(bp, offset, fw->rodata[j]);
2888 }
2889 }
2890
2891 /* Clear the pre-fetch instruction. */
2892 REG_WR_IND(bp, cpu_reg->inst, 0);
2893 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2894
2895 /* Start the CPU. */
2896 val = REG_RD_IND(bp, cpu_reg->mode);
2897 val &= ~cpu_reg->mode_value_halt;
2898 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2899 REG_WR_IND(bp, cpu_reg->mode, val);
af3ee519
MC
2900
2901 return 0;
b6016b76
MC
2902}
2903
fba9fe91 2904static int
b6016b76
MC
2905bnx2_init_cpus(struct bnx2 *bp)
2906{
2907 struct cpu_reg cpu_reg;
af3ee519 2908 struct fw_info *fw;
b3448b0b 2909 int rc;
fba9fe91 2910 void *text;
b6016b76
MC
2911
2912 /* Initialize the RV2P processor. */
b3448b0b
DV
2913 text = vmalloc(FW_BUF_SIZE);
2914 if (!text)
2915 return -ENOMEM;
8336793b 2916 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
ea1f8d5c 2917 if (rc < 0)
fba9fe91 2918 goto init_cpu_err;
ea1f8d5c 2919
b3448b0b 2920 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
fba9fe91 2921
8336793b 2922 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
ea1f8d5c 2923 if (rc < 0)
fba9fe91 2924 goto init_cpu_err;
ea1f8d5c 2925
b3448b0b 2926 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
b6016b76
MC
2927
2928 /* Initialize the RX Processor. */
2929 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2930 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2931 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2932 cpu_reg.state = BNX2_RXP_CPU_STATE;
2933 cpu_reg.state_value_clear = 0xffffff;
2934 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2935 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2936 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2937 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2938 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2939 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2940 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2941
d43584c8
MC
2942 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2943 fw = &bnx2_rxp_fw_09;
2944 else
2945 fw = &bnx2_rxp_fw_06;
fba9fe91 2946
ea1f8d5c 2947 fw->text = text;
af3ee519 2948 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2949 if (rc)
2950 goto init_cpu_err;
2951
b6016b76
MC
2952 /* Initialize the TX Processor. */
2953 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2954 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2955 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2956 cpu_reg.state = BNX2_TXP_CPU_STATE;
2957 cpu_reg.state_value_clear = 0xffffff;
2958 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2959 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2960 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2961 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2962 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2963 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2964 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2965
d43584c8
MC
2966 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2967 fw = &bnx2_txp_fw_09;
2968 else
2969 fw = &bnx2_txp_fw_06;
fba9fe91 2970
ea1f8d5c 2971 fw->text = text;
af3ee519 2972 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2973 if (rc)
2974 goto init_cpu_err;
2975
b6016b76
MC
2976 /* Initialize the TX Patch-up Processor. */
2977 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2978 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2979 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2980 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2981 cpu_reg.state_value_clear = 0xffffff;
2982 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2983 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2984 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2985 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2986 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2987 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2988 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2989
d43584c8
MC
2990 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2991 fw = &bnx2_tpat_fw_09;
2992 else
2993 fw = &bnx2_tpat_fw_06;
fba9fe91 2994
ea1f8d5c 2995 fw->text = text;
af3ee519 2996 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2997 if (rc)
2998 goto init_cpu_err;
2999
b6016b76
MC
3000 /* Initialize the Completion Processor. */
3001 cpu_reg.mode = BNX2_COM_CPU_MODE;
3002 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3003 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3004 cpu_reg.state = BNX2_COM_CPU_STATE;
3005 cpu_reg.state_value_clear = 0xffffff;
3006 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3007 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3008 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3009 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3010 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3011 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3012 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3013
d43584c8
MC
3014 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3015 fw = &bnx2_com_fw_09;
3016 else
3017 fw = &bnx2_com_fw_06;
fba9fe91 3018
ea1f8d5c 3019 fw->text = text;
af3ee519 3020 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3021 if (rc)
3022 goto init_cpu_err;
3023
d43584c8
MC
3024 /* Initialize the Command Processor. */
3025 cpu_reg.mode = BNX2_CP_CPU_MODE;
3026 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3027 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3028 cpu_reg.state = BNX2_CP_CPU_STATE;
3029 cpu_reg.state_value_clear = 0xffffff;
3030 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3031 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3032 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3033 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3034 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3035 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3036 cpu_reg.mips_view_base = 0x8000000;
b6016b76 3037
d43584c8
MC
3038 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3039 fw = &bnx2_cp_fw_09;
b6016b76 3040
ea1f8d5c 3041 fw->text = text;
6c1bbcc8 3042 rc = load_cpu_fw(bp, &cpu_reg, fw);
d43584c8
MC
3043 if (rc)
3044 goto init_cpu_err;
3045 }
fba9fe91 3046init_cpu_err:
ea1f8d5c 3047 vfree(text);
fba9fe91 3048 return rc;
b6016b76
MC
3049}
3050
3051static int
829ca9a3 3052bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3053{
3054 u16 pmcsr;
3055
3056 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3057
3058 switch (state) {
829ca9a3 3059 case PCI_D0: {
b6016b76
MC
3060 u32 val;
3061
3062 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3063 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3064 PCI_PM_CTRL_PME_STATUS);
3065
3066 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3067 /* delay required during transition out of D3hot */
3068 msleep(20);
3069
3070 val = REG_RD(bp, BNX2_EMAC_MODE);
3071 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3072 val &= ~BNX2_EMAC_MODE_MPKT;
3073 REG_WR(bp, BNX2_EMAC_MODE, val);
3074
3075 val = REG_RD(bp, BNX2_RPM_CONFIG);
3076 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3077 REG_WR(bp, BNX2_RPM_CONFIG, val);
3078 break;
3079 }
829ca9a3 3080 case PCI_D3hot: {
b6016b76
MC
3081 int i;
3082 u32 val, wol_msg;
3083
3084 if (bp->wol) {
3085 u32 advertising;
3086 u8 autoneg;
3087
3088 autoneg = bp->autoneg;
3089 advertising = bp->advertising;
3090
239cd343
MC
3091 if (bp->phy_port == PORT_TP) {
3092 bp->autoneg = AUTONEG_SPEED;
3093 bp->advertising = ADVERTISED_10baseT_Half |
3094 ADVERTISED_10baseT_Full |
3095 ADVERTISED_100baseT_Half |
3096 ADVERTISED_100baseT_Full |
3097 ADVERTISED_Autoneg;
3098 }
b6016b76 3099
239cd343
MC
3100 spin_lock_bh(&bp->phy_lock);
3101 bnx2_setup_phy(bp, bp->phy_port);
3102 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3103
3104 bp->autoneg = autoneg;
3105 bp->advertising = advertising;
3106
3107 bnx2_set_mac_addr(bp);
3108
3109 val = REG_RD(bp, BNX2_EMAC_MODE);
3110
3111 /* Enable port mode. */
3112 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3113 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3114 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3115 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3116 if (bp->phy_port == PORT_TP)
3117 val |= BNX2_EMAC_MODE_PORT_MII;
3118 else {
3119 val |= BNX2_EMAC_MODE_PORT_GMII;
3120 if (bp->line_speed == SPEED_2500)
3121 val |= BNX2_EMAC_MODE_25G_MODE;
3122 }
b6016b76
MC
3123
3124 REG_WR(bp, BNX2_EMAC_MODE, val);
3125
3126 /* receive all multicast */
3127 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3128 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3129 0xffffffff);
3130 }
3131 REG_WR(bp, BNX2_EMAC_RX_MODE,
3132 BNX2_EMAC_RX_MODE_SORT_MODE);
3133
3134 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3135 BNX2_RPM_SORT_USER0_MC_EN;
3136 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3137 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3138 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3139 BNX2_RPM_SORT_USER0_ENA);
3140
3141 /* Need to enable EMAC and RPM for WOL. */
3142 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3143 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3144 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3145 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3146
3147 val = REG_RD(bp, BNX2_RPM_CONFIG);
3148 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3149 REG_WR(bp, BNX2_RPM_CONFIG, val);
3150
3151 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3152 }
3153 else {
3154 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3155 }
3156
dda1e390
MC
3157 if (!(bp->flags & NO_WOL_FLAG))
3158 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
3159
3160 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3161 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3162 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3163
3164 if (bp->wol)
3165 pmcsr |= 3;
3166 }
3167 else {
3168 pmcsr |= 3;
3169 }
3170 if (bp->wol) {
3171 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3172 }
3173 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3174 pmcsr);
3175
3176 /* No more memory access after this point until
3177 * device is brought back to D0.
3178 */
3179 udelay(50);
3180 break;
3181 }
3182 default:
3183 return -EINVAL;
3184 }
3185 return 0;
3186}
3187
3188static int
3189bnx2_acquire_nvram_lock(struct bnx2 *bp)
3190{
3191 u32 val;
3192 int j;
3193
3194 /* Request access to the flash interface. */
3195 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3196 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3197 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3198 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3199 break;
3200
3201 udelay(5);
3202 }
3203
3204 if (j >= NVRAM_TIMEOUT_COUNT)
3205 return -EBUSY;
3206
3207 return 0;
3208}
3209
3210static int
3211bnx2_release_nvram_lock(struct bnx2 *bp)
3212{
3213 int j;
3214 u32 val;
3215
3216 /* Relinquish nvram interface. */
3217 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3218
3219 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3220 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3221 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3222 break;
3223
3224 udelay(5);
3225 }
3226
3227 if (j >= NVRAM_TIMEOUT_COUNT)
3228 return -EBUSY;
3229
3230 return 0;
3231}
3232
3233
3234static int
3235bnx2_enable_nvram_write(struct bnx2 *bp)
3236{
3237 u32 val;
3238
3239 val = REG_RD(bp, BNX2_MISC_CFG);
3240 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3241
e30372c9 3242 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
3243 int j;
3244
3245 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3246 REG_WR(bp, BNX2_NVM_COMMAND,
3247 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3248
3249 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3250 udelay(5);
3251
3252 val = REG_RD(bp, BNX2_NVM_COMMAND);
3253 if (val & BNX2_NVM_COMMAND_DONE)
3254 break;
3255 }
3256
3257 if (j >= NVRAM_TIMEOUT_COUNT)
3258 return -EBUSY;
3259 }
3260 return 0;
3261}
3262
3263static void
3264bnx2_disable_nvram_write(struct bnx2 *bp)
3265{
3266 u32 val;
3267
3268 val = REG_RD(bp, BNX2_MISC_CFG);
3269 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3270}
3271
3272
3273static void
3274bnx2_enable_nvram_access(struct bnx2 *bp)
3275{
3276 u32 val;
3277
3278 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3279 /* Enable both bits, even on read. */
6aa20a22 3280 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3281 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3282}
3283
3284static void
3285bnx2_disable_nvram_access(struct bnx2 *bp)
3286{
3287 u32 val;
3288
3289 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3290 /* Disable both bits, even after read. */
6aa20a22 3291 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3292 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3293 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3294}
3295
3296static int
3297bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3298{
3299 u32 cmd;
3300 int j;
3301
e30372c9 3302 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
3303 /* Buffered flash, no erase needed */
3304 return 0;
3305
3306 /* Build an erase command */
3307 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3308 BNX2_NVM_COMMAND_DOIT;
3309
3310 /* Need to clear DONE bit separately. */
3311 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3312
3313 /* Address of the NVRAM to read from. */
3314 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3315
3316 /* Issue an erase command. */
3317 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3318
3319 /* Wait for completion. */
3320 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3321 u32 val;
3322
3323 udelay(5);
3324
3325 val = REG_RD(bp, BNX2_NVM_COMMAND);
3326 if (val & BNX2_NVM_COMMAND_DONE)
3327 break;
3328 }
3329
3330 if (j >= NVRAM_TIMEOUT_COUNT)
3331 return -EBUSY;
3332
3333 return 0;
3334}
3335
3336static int
3337bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3338{
3339 u32 cmd;
3340 int j;
3341
3342 /* Build the command word. */
3343 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3344
e30372c9
MC
3345 /* Calculate an offset of a buffered flash, not needed for 5709. */
3346 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3347 offset = ((offset / bp->flash_info->page_size) <<
3348 bp->flash_info->page_bits) +
3349 (offset % bp->flash_info->page_size);
3350 }
3351
3352 /* Need to clear DONE bit separately. */
3353 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3354
3355 /* Address of the NVRAM to read from. */
3356 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3357
3358 /* Issue a read command. */
3359 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3360
3361 /* Wait for completion. */
3362 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3363 u32 val;
3364
3365 udelay(5);
3366
3367 val = REG_RD(bp, BNX2_NVM_COMMAND);
3368 if (val & BNX2_NVM_COMMAND_DONE) {
3369 val = REG_RD(bp, BNX2_NVM_READ);
3370
3371 val = be32_to_cpu(val);
3372 memcpy(ret_val, &val, 4);
3373 break;
3374 }
3375 }
3376 if (j >= NVRAM_TIMEOUT_COUNT)
3377 return -EBUSY;
3378
3379 return 0;
3380}
3381
3382
3383static int
3384bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3385{
3386 u32 cmd, val32;
3387 int j;
3388
3389 /* Build the command word. */
3390 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3391
e30372c9
MC
3392 /* Calculate an offset of a buffered flash, not needed for 5709. */
3393 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3394 offset = ((offset / bp->flash_info->page_size) <<
3395 bp->flash_info->page_bits) +
3396 (offset % bp->flash_info->page_size);
3397 }
3398
3399 /* Need to clear DONE bit separately. */
3400 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3401
3402 memcpy(&val32, val, 4);
3403 val32 = cpu_to_be32(val32);
3404
3405 /* Write the data. */
3406 REG_WR(bp, BNX2_NVM_WRITE, val32);
3407
3408 /* Address of the NVRAM to write to. */
3409 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3410
3411 /* Issue the write command. */
3412 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3413
3414 /* Wait for completion. */
3415 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3416 udelay(5);
3417
3418 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3419 break;
3420 }
3421 if (j >= NVRAM_TIMEOUT_COUNT)
3422 return -EBUSY;
3423
3424 return 0;
3425}
3426
3427static int
3428bnx2_init_nvram(struct bnx2 *bp)
3429{
3430 u32 val;
e30372c9 3431 int j, entry_count, rc = 0;
b6016b76
MC
3432 struct flash_spec *flash;
3433
e30372c9
MC
3434 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3435 bp->flash_info = &flash_5709;
3436 goto get_flash_size;
3437 }
3438
b6016b76
MC
3439 /* Determine the selected interface. */
3440 val = REG_RD(bp, BNX2_NVM_CFG1);
3441
ff8ac609 3442 entry_count = ARRAY_SIZE(flash_table);
b6016b76 3443
b6016b76
MC
3444 if (val & 0x40000000) {
3445
3446 /* Flash interface has been reconfigured */
3447 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
3448 j++, flash++) {
3449 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3450 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
3451 bp->flash_info = flash;
3452 break;
3453 }
3454 }
3455 }
3456 else {
37137709 3457 u32 mask;
b6016b76
MC
3458 /* Not yet been reconfigured */
3459
37137709
MC
3460 if (val & (1 << 23))
3461 mask = FLASH_BACKUP_STRAP_MASK;
3462 else
3463 mask = FLASH_STRAP_MASK;
3464
b6016b76
MC
3465 for (j = 0, flash = &flash_table[0]; j < entry_count;
3466 j++, flash++) {
3467
37137709 3468 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
3469 bp->flash_info = flash;
3470
3471 /* Request access to the flash interface. */
3472 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3473 return rc;
3474
3475 /* Enable access to flash interface */
3476 bnx2_enable_nvram_access(bp);
3477
3478 /* Reconfigure the flash interface */
3479 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3480 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3481 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3482 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3483
3484 /* Disable access to flash interface */
3485 bnx2_disable_nvram_access(bp);
3486 bnx2_release_nvram_lock(bp);
3487
3488 break;
3489 }
3490 }
3491 } /* if (val & 0x40000000) */
3492
3493 if (j == entry_count) {
3494 bp->flash_info = NULL;
2f23c523 3495 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 3496 return -ENODEV;
b6016b76
MC
3497 }
3498
e30372c9 3499get_flash_size:
1122db71
MC
3500 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3501 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3502 if (val)
3503 bp->flash_size = val;
3504 else
3505 bp->flash_size = bp->flash_info->total_size;
3506
b6016b76
MC
3507 return rc;
3508}
3509
3510static int
3511bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3512 int buf_size)
3513{
3514 int rc = 0;
3515 u32 cmd_flags, offset32, len32, extra;
3516
3517 if (buf_size == 0)
3518 return 0;
3519
3520 /* Request access to the flash interface. */
3521 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3522 return rc;
3523
3524 /* Enable access to flash interface */
3525 bnx2_enable_nvram_access(bp);
3526
3527 len32 = buf_size;
3528 offset32 = offset;
3529 extra = 0;
3530
3531 cmd_flags = 0;
3532
3533 if (offset32 & 3) {
3534 u8 buf[4];
3535 u32 pre_len;
3536
3537 offset32 &= ~3;
3538 pre_len = 4 - (offset & 3);
3539
3540 if (pre_len >= len32) {
3541 pre_len = len32;
3542 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3543 BNX2_NVM_COMMAND_LAST;
3544 }
3545 else {
3546 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3547 }
3548
3549 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3550
3551 if (rc)
3552 return rc;
3553
3554 memcpy(ret_buf, buf + (offset & 3), pre_len);
3555
3556 offset32 += 4;
3557 ret_buf += pre_len;
3558 len32 -= pre_len;
3559 }
3560 if (len32 & 3) {
3561 extra = 4 - (len32 & 3);
3562 len32 = (len32 + 4) & ~3;
3563 }
3564
3565 if (len32 == 4) {
3566 u8 buf[4];
3567
3568 if (cmd_flags)
3569 cmd_flags = BNX2_NVM_COMMAND_LAST;
3570 else
3571 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3572 BNX2_NVM_COMMAND_LAST;
3573
3574 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3575
3576 memcpy(ret_buf, buf, 4 - extra);
3577 }
3578 else if (len32 > 0) {
3579 u8 buf[4];
3580
3581 /* Read the first word. */
3582 if (cmd_flags)
3583 cmd_flags = 0;
3584 else
3585 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3586
3587 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3588
3589 /* Advance to the next dword. */
3590 offset32 += 4;
3591 ret_buf += 4;
3592 len32 -= 4;
3593
3594 while (len32 > 4 && rc == 0) {
3595 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3596
3597 /* Advance to the next dword. */
3598 offset32 += 4;
3599 ret_buf += 4;
3600 len32 -= 4;
3601 }
3602
3603 if (rc)
3604 return rc;
3605
3606 cmd_flags = BNX2_NVM_COMMAND_LAST;
3607 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3608
3609 memcpy(ret_buf, buf, 4 - extra);
3610 }
3611
3612 /* Disable access to flash interface */
3613 bnx2_disable_nvram_access(bp);
3614
3615 bnx2_release_nvram_lock(bp);
3616
3617 return rc;
3618}
3619
3620static int
3621bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3622 int buf_size)
3623{
3624 u32 written, offset32, len32;
e6be763f 3625 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
3626 int rc = 0;
3627 int align_start, align_end;
3628
3629 buf = data_buf;
3630 offset32 = offset;
3631 len32 = buf_size;
3632 align_start = align_end = 0;
3633
3634 if ((align_start = (offset32 & 3))) {
3635 offset32 &= ~3;
c873879c
MC
3636 len32 += align_start;
3637 if (len32 < 4)
3638 len32 = 4;
b6016b76
MC
3639 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3640 return rc;
3641 }
3642
3643 if (len32 & 3) {
c873879c
MC
3644 align_end = 4 - (len32 & 3);
3645 len32 += align_end;
3646 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3647 return rc;
b6016b76
MC
3648 }
3649
3650 if (align_start || align_end) {
e6be763f
MC
3651 align_buf = kmalloc(len32, GFP_KERNEL);
3652 if (align_buf == NULL)
b6016b76
MC
3653 return -ENOMEM;
3654 if (align_start) {
e6be763f 3655 memcpy(align_buf, start, 4);
b6016b76
MC
3656 }
3657 if (align_end) {
e6be763f 3658 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 3659 }
e6be763f
MC
3660 memcpy(align_buf + align_start, data_buf, buf_size);
3661 buf = align_buf;
b6016b76
MC
3662 }
3663
e30372c9 3664 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
3665 flash_buffer = kmalloc(264, GFP_KERNEL);
3666 if (flash_buffer == NULL) {
3667 rc = -ENOMEM;
3668 goto nvram_write_end;
3669 }
3670 }
3671
b6016b76
MC
3672 written = 0;
3673 while ((written < len32) && (rc == 0)) {
3674 u32 page_start, page_end, data_start, data_end;
3675 u32 addr, cmd_flags;
3676 int i;
b6016b76
MC
3677
3678 /* Find the page_start addr */
3679 page_start = offset32 + written;
3680 page_start -= (page_start % bp->flash_info->page_size);
3681 /* Find the page_end addr */
3682 page_end = page_start + bp->flash_info->page_size;
3683 /* Find the data_start addr */
3684 data_start = (written == 0) ? offset32 : page_start;
3685 /* Find the data_end addr */
6aa20a22 3686 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
3687 (offset32 + len32) : page_end;
3688
3689 /* Request access to the flash interface. */
3690 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3691 goto nvram_write_end;
3692
3693 /* Enable access to flash interface */
3694 bnx2_enable_nvram_access(bp);
3695
3696 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 3697 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
3698 int j;
3699
3700 /* Read the whole page into the buffer
3701 * (non-buffer flash only) */
3702 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3703 if (j == (bp->flash_info->page_size - 4)) {
3704 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3705 }
3706 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
3707 page_start + j,
3708 &flash_buffer[j],
b6016b76
MC
3709 cmd_flags);
3710
3711 if (rc)
3712 goto nvram_write_end;
3713
3714 cmd_flags = 0;
3715 }
3716 }
3717
3718 /* Enable writes to flash interface (unlock write-protect) */
3719 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3720 goto nvram_write_end;
3721
b6016b76
MC
3722 /* Loop to write back the buffer data from page_start to
3723 * data_start */
3724 i = 0;
e30372c9 3725 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
3726 /* Erase the page */
3727 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3728 goto nvram_write_end;
3729
3730 /* Re-enable the write again for the actual write */
3731 bnx2_enable_nvram_write(bp);
3732
b6016b76
MC
3733 for (addr = page_start; addr < data_start;
3734 addr += 4, i += 4) {
6aa20a22 3735
b6016b76
MC
3736 rc = bnx2_nvram_write_dword(bp, addr,
3737 &flash_buffer[i], cmd_flags);
3738
3739 if (rc != 0)
3740 goto nvram_write_end;
3741
3742 cmd_flags = 0;
3743 }
3744 }
3745
3746 /* Loop to write the new data from data_start to data_end */
bae25761 3747 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 3748 if ((addr == page_end - 4) ||
e30372c9 3749 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
3750 (addr == data_end - 4))) {
3751
3752 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3753 }
3754 rc = bnx2_nvram_write_dword(bp, addr, buf,
3755 cmd_flags);
3756
3757 if (rc != 0)
3758 goto nvram_write_end;
3759
3760 cmd_flags = 0;
3761 buf += 4;
3762 }
3763
3764 /* Loop to write back the buffer data from data_end
3765 * to page_end */
e30372c9 3766 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
3767 for (addr = data_end; addr < page_end;
3768 addr += 4, i += 4) {
6aa20a22 3769
b6016b76
MC
3770 if (addr == page_end-4) {
3771 cmd_flags = BNX2_NVM_COMMAND_LAST;
3772 }
3773 rc = bnx2_nvram_write_dword(bp, addr,
3774 &flash_buffer[i], cmd_flags);
3775
3776 if (rc != 0)
3777 goto nvram_write_end;
3778
3779 cmd_flags = 0;
3780 }
3781 }
3782
3783 /* Disable writes to flash interface (lock write-protect) */
3784 bnx2_disable_nvram_write(bp);
3785
3786 /* Disable access to flash interface */
3787 bnx2_disable_nvram_access(bp);
3788 bnx2_release_nvram_lock(bp);
3789
3790 /* Increment written */
3791 written += data_end - data_start;
3792 }
3793
3794nvram_write_end:
e6be763f
MC
3795 kfree(flash_buffer);
3796 kfree(align_buf);
b6016b76
MC
3797 return rc;
3798}
3799
0d8a6571
MC
3800static void
3801bnx2_init_remote_phy(struct bnx2 *bp)
3802{
3803 u32 val;
3804
3805 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3806 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3807 return;
3808
3809 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3810 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3811 return;
3812
3813 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
0d8a6571
MC
3814 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3815
3816 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3817 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3818 bp->phy_port = PORT_FIBRE;
3819 else
3820 bp->phy_port = PORT_TP;
489310a4
MC
3821
3822 if (netif_running(bp->dev)) {
3823 u32 sig;
3824
3825 if (val & BNX2_LINK_STATUS_LINK_UP) {
3826 bp->link_up = 1;
3827 netif_carrier_on(bp->dev);
3828 } else {
3829 bp->link_up = 0;
3830 netif_carrier_off(bp->dev);
3831 }
3832 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
3833 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3834 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3835 sig);
3836 }
0d8a6571
MC
3837 }
3838}
3839
b6016b76
MC
3840static int
3841bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3842{
3843 u32 val;
3844 int i, rc = 0;
489310a4 3845 u8 old_port;
b6016b76
MC
3846
3847 /* Wait for the current PCI transaction to complete before
3848 * issuing a reset. */
3849 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3850 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3851 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3852 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3853 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3854 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3855 udelay(5);
3856
b090ae2b
MC
3857 /* Wait for the firmware to tell us it is ok to issue a reset. */
3858 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3859
b6016b76
MC
3860 /* Deposit a driver reset signature so the firmware knows that
3861 * this is a soft reset. */
e3648b3d 3862 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
3863 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3864
b6016b76
MC
3865 /* Do a dummy read to force the chip to complete all current transaction
3866 * before we issue a reset. */
3867 val = REG_RD(bp, BNX2_MISC_ID);
3868
234754d5
MC
3869 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3870 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3871 REG_RD(bp, BNX2_MISC_COMMAND);
3872 udelay(5);
b6016b76 3873
234754d5
MC
3874 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3875 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 3876
234754d5 3877 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 3878
234754d5
MC
3879 } else {
3880 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3881 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3882 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3883
3884 /* Chip reset. */
3885 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3886
594a9dfa
MC
3887 /* Reading back any register after chip reset will hang the
3888 * bus on 5706 A0 and A1. The msleep below provides plenty
3889 * of margin for write posting.
3890 */
234754d5 3891 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
3892 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3893 msleep(20);
b6016b76 3894
234754d5
MC
3895 /* Reset takes approximate 30 usec */
3896 for (i = 0; i < 10; i++) {
3897 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3898 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3899 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3900 break;
3901 udelay(10);
3902 }
3903
3904 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3905 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3906 printk(KERN_ERR PFX "Chip reset did not complete\n");
3907 return -EBUSY;
3908 }
b6016b76
MC
3909 }
3910
3911 /* Make sure byte swapping is properly configured. */
3912 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3913 if (val != 0x01020304) {
3914 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3915 return -ENODEV;
3916 }
3917
b6016b76 3918 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
3919 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3920 if (rc)
3921 return rc;
b6016b76 3922
0d8a6571 3923 spin_lock_bh(&bp->phy_lock);
489310a4 3924 old_port = bp->phy_port;
0d8a6571 3925 bnx2_init_remote_phy(bp);
489310a4 3926 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
0d8a6571
MC
3927 bnx2_set_default_remote_link(bp);
3928 spin_unlock_bh(&bp->phy_lock);
3929
b6016b76
MC
3930 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3931 /* Adjust the voltage regular to two steps lower. The default
3932 * of this register is 0x0000000e. */
3933 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3934
3935 /* Remove bad rbuf memory from the free pool. */
3936 rc = bnx2_alloc_bad_rbuf(bp);
3937 }
3938
3939 return rc;
3940}
3941
3942static int
3943bnx2_init_chip(struct bnx2 *bp)
3944{
3945 u32 val;
b090ae2b 3946 int rc;
b6016b76
MC
3947
3948 /* Make sure the interrupt is not active. */
3949 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3950
3951 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3952 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3953#ifdef __BIG_ENDIAN
6aa20a22 3954 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 3955#endif
6aa20a22 3956 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
3957 DMA_READ_CHANS << 12 |
3958 DMA_WRITE_CHANS << 16;
3959
3960 val |= (0x2 << 20) | (1 << 11);
3961
dda1e390 3962 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
3963 val |= (1 << 23);
3964
3965 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3966 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3967 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3968
3969 REG_WR(bp, BNX2_DMA_CONFIG, val);
3970
3971 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3972 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3973 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3974 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3975 }
3976
3977 if (bp->flags & PCIX_FLAG) {
3978 u16 val16;
3979
3980 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3981 &val16);
3982 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3983 val16 & ~PCI_X_CMD_ERO);
3984 }
3985
3986 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3987 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3988 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3989 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3990
3991 /* Initialize context mapping and zero out the quick contexts. The
3992 * context block must have already been enabled. */
641bdcd5
MC
3993 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3994 rc = bnx2_init_5709_context(bp);
3995 if (rc)
3996 return rc;
3997 } else
59b47d8a 3998 bnx2_init_context(bp);
b6016b76 3999
fba9fe91
MC
4000 if ((rc = bnx2_init_cpus(bp)) != 0)
4001 return rc;
4002
b6016b76
MC
4003 bnx2_init_nvram(bp);
4004
4005 bnx2_set_mac_addr(bp);
4006
4007 val = REG_RD(bp, BNX2_MQ_CONFIG);
4008 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4009 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
4010 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4011 val |= BNX2_MQ_CONFIG_HALT_DIS;
4012
b6016b76
MC
4013 REG_WR(bp, BNX2_MQ_CONFIG, val);
4014
4015 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4016 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4017 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4018
4019 val = (BCM_PAGE_BITS - 8) << 24;
4020 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4021
4022 /* Configure page size. */
4023 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4024 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4025 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4026 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4027
4028 val = bp->mac_addr[0] +
4029 (bp->mac_addr[1] << 8) +
4030 (bp->mac_addr[2] << 16) +
4031 bp->mac_addr[3] +
4032 (bp->mac_addr[4] << 8) +
4033 (bp->mac_addr[5] << 16);
4034 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4035
4036 /* Program the MTU. Also include 4 bytes for CRC32. */
4037 val = bp->dev->mtu + ETH_HLEN + 4;
4038 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4039 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4040 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4041
4042 bp->last_status_idx = 0;
4043 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4044
4045 /* Set up how to generate a link change interrupt. */
4046 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4047
4048 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4049 (u64) bp->status_blk_mapping & 0xffffffff);
4050 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4051
4052 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4053 (u64) bp->stats_blk_mapping & 0xffffffff);
4054 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4055 (u64) bp->stats_blk_mapping >> 32);
4056
6aa20a22 4057 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4058 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4059
4060 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4061 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4062
4063 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4064 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4065
4066 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4067
4068 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4069
4070 REG_WR(bp, BNX2_HC_COM_TICKS,
4071 (bp->com_ticks_int << 16) | bp->com_ticks);
4072
4073 REG_WR(bp, BNX2_HC_CMD_TICKS,
4074 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4075
02537b06
MC
4076 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4077 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4078 else
7ea6920e 4079 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4080 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4081
4082 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4083 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4084 else {
8e6a72c4
MC
4085 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4086 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4087 }
4088
8e6a72c4
MC
4089 if (bp->flags & ONE_SHOT_MSI_FLAG)
4090 val |= BNX2_HC_CONFIG_ONE_SHOT;
4091
4092 REG_WR(bp, BNX2_HC_CONFIG, val);
4093
b6016b76
MC
4094 /* Clear internal stats counters. */
4095 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4096
da3e4fbe 4097 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4098
4099 /* Initialize the receive filter. */
4100 bnx2_set_rx_mode(bp->dev);
4101
0aa38df7
MC
4102 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4103 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4104 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4105 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4106 }
b090ae2b
MC
4107 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4108 0);
b6016b76 4109
df149d70 4110 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4111 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4112
4113 udelay(20);
4114
bf5295bb
MC
4115 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4116
b090ae2b 4117 return rc;
b6016b76
MC
4118}
4119
59b47d8a
MC
4120static void
4121bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4122{
4123 u32 val, offset0, offset1, offset2, offset3;
4124
4125 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4126 offset0 = BNX2_L2CTX_TYPE_XI;
4127 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4128 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4129 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4130 } else {
4131 offset0 = BNX2_L2CTX_TYPE;
4132 offset1 = BNX2_L2CTX_CMD_TYPE;
4133 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4134 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4135 }
4136 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4137 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4138
4139 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4140 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4141
4142 val = (u64) bp->tx_desc_mapping >> 32;
4143 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4144
4145 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4146 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4147}
b6016b76
MC
4148
4149static void
4150bnx2_init_tx_ring(struct bnx2 *bp)
4151{
4152 struct tx_bd *txbd;
59b47d8a 4153 u32 cid;
b6016b76 4154
2f8af120
MC
4155 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4156
b6016b76 4157 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4158
b6016b76
MC
4159 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4160 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4161
4162 bp->tx_prod = 0;
4163 bp->tx_cons = 0;
f4e418f7 4164 bp->hw_tx_cons = 0;
b6016b76 4165 bp->tx_prod_bseq = 0;
6aa20a22 4166
59b47d8a
MC
4167 cid = TX_CID;
4168 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4169 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4170
59b47d8a 4171 bnx2_init_tx_context(bp, cid);
b6016b76
MC
4172}
4173
4174static void
4175bnx2_init_rx_ring(struct bnx2 *bp)
4176{
4177 struct rx_bd *rxbd;
4178 int i;
6aa20a22 4179 u16 prod, ring_prod;
b6016b76
MC
4180 u32 val;
4181
4182 /* 8 for CRC and VLAN */
4183 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
59b47d8a
MC
4184 /* hw alignment */
4185 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
b6016b76
MC
4186
4187 ring_prod = prod = bp->rx_prod = 0;
4188 bp->rx_cons = 0;
4189 bp->rx_prod_bseq = 0;
6aa20a22 4190
13daffa2
MC
4191 for (i = 0; i < bp->rx_max_ring; i++) {
4192 int j;
b6016b76 4193
13daffa2
MC
4194 rxbd = &bp->rx_desc_ring[i][0];
4195 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4196 rxbd->rx_bd_len = bp->rx_buf_use_size;
4197 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4198 }
4199 if (i == (bp->rx_max_ring - 1))
4200 j = 0;
4201 else
4202 j = i + 1;
4203 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4204 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4205 0xffffffff;
4206 }
b6016b76
MC
4207
4208 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4209 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4210 val |= 0x02 << 8;
4211 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4212
13daffa2 4213 val = (u64) bp->rx_desc_mapping[0] >> 32;
b6016b76
MC
4214 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4215
13daffa2 4216 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
b6016b76
MC
4217 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4218
236b6394 4219 for (i = 0; i < bp->rx_ring_size; i++) {
b6016b76
MC
4220 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4221 break;
4222 }
4223 prod = NEXT_RX_BD(prod);
4224 ring_prod = RX_RING_IDX(prod);
4225 }
4226 bp->rx_prod = prod;
4227
4228 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4229
4230 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4231}
4232
13daffa2
MC
4233static void
4234bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4235{
4236 u32 num_rings, max;
4237
4238 bp->rx_ring_size = size;
4239 num_rings = 1;
4240 while (size > MAX_RX_DESC_CNT) {
4241 size -= MAX_RX_DESC_CNT;
4242 num_rings++;
4243 }
4244 /* round to next power of 2 */
4245 max = MAX_RX_RINGS;
4246 while ((max & num_rings) == 0)
4247 max >>= 1;
4248
4249 if (num_rings != max)
4250 max <<= 1;
4251
4252 bp->rx_max_ring = max;
4253 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4254}
4255
b6016b76
MC
4256static void
4257bnx2_free_tx_skbs(struct bnx2 *bp)
4258{
4259 int i;
4260
4261 if (bp->tx_buf_ring == NULL)
4262 return;
4263
4264 for (i = 0; i < TX_DESC_CNT; ) {
4265 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4266 struct sk_buff *skb = tx_buf->skb;
4267 int j, last;
4268
4269 if (skb == NULL) {
4270 i++;
4271 continue;
4272 }
4273
4274 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4275 skb_headlen(skb), PCI_DMA_TODEVICE);
4276
4277 tx_buf->skb = NULL;
4278
4279 last = skb_shinfo(skb)->nr_frags;
4280 for (j = 0; j < last; j++) {
4281 tx_buf = &bp->tx_buf_ring[i + j + 1];
4282 pci_unmap_page(bp->pdev,
4283 pci_unmap_addr(tx_buf, mapping),
4284 skb_shinfo(skb)->frags[j].size,
4285 PCI_DMA_TODEVICE);
4286 }
745720e5 4287 dev_kfree_skb(skb);
b6016b76
MC
4288 i += j + 1;
4289 }
4290
4291}
4292
4293static void
4294bnx2_free_rx_skbs(struct bnx2 *bp)
4295{
4296 int i;
4297
4298 if (bp->rx_buf_ring == NULL)
4299 return;
4300
13daffa2 4301 for (i = 0; i < bp->rx_max_ring_idx; i++) {
b6016b76
MC
4302 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4303 struct sk_buff *skb = rx_buf->skb;
4304
05d0f1cf 4305 if (skb == NULL)
b6016b76
MC
4306 continue;
4307
4308 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4309 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4310
4311 rx_buf->skb = NULL;
4312
745720e5 4313 dev_kfree_skb(skb);
b6016b76
MC
4314 }
4315}
4316
4317static void
4318bnx2_free_skbs(struct bnx2 *bp)
4319{
4320 bnx2_free_tx_skbs(bp);
4321 bnx2_free_rx_skbs(bp);
4322}
4323
4324static int
4325bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4326{
4327 int rc;
4328
4329 rc = bnx2_reset_chip(bp, reset_code);
4330 bnx2_free_skbs(bp);
4331 if (rc)
4332 return rc;
4333
fba9fe91
MC
4334 if ((rc = bnx2_init_chip(bp)) != 0)
4335 return rc;
4336
b6016b76
MC
4337 bnx2_init_tx_ring(bp);
4338 bnx2_init_rx_ring(bp);
4339 return 0;
4340}
4341
4342static int
4343bnx2_init_nic(struct bnx2 *bp)
4344{
4345 int rc;
4346
4347 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4348 return rc;
4349
80be4434 4350 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4351 bnx2_init_phy(bp);
4352 bnx2_set_link(bp);
0d8a6571 4353 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4354 return 0;
4355}
4356
4357static int
4358bnx2_test_registers(struct bnx2 *bp)
4359{
4360 int ret;
5bae30c9 4361 int i, is_5709;
f71e1309 4362 static const struct {
b6016b76
MC
4363 u16 offset;
4364 u16 flags;
5bae30c9 4365#define BNX2_FL_NOT_5709 1
b6016b76
MC
4366 u32 rw_mask;
4367 u32 ro_mask;
4368 } reg_tbl[] = {
4369 { 0x006c, 0, 0x00000000, 0x0000003f },
4370 { 0x0090, 0, 0xffffffff, 0x00000000 },
4371 { 0x0094, 0, 0x00000000, 0x00000000 },
4372
5bae30c9
MC
4373 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4374 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4375 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4376 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4377 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4378 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4379 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4380 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4381 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4382
4383 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4384 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4385 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4386 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4387 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4388 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4389
4390 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4391 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4392 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
4393
4394 { 0x1000, 0, 0x00000000, 0x00000001 },
4395 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
4396
4397 { 0x1408, 0, 0x01c00800, 0x00000000 },
4398 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4399 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 4400 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
4401 { 0x14b0, 0, 0x00000002, 0x00000001 },
4402 { 0x14b8, 0, 0x00000000, 0x00000000 },
4403 { 0x14c0, 0, 0x00000000, 0x00000009 },
4404 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4405 { 0x14cc, 0, 0x00000000, 0x00000001 },
4406 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
4407
4408 { 0x1800, 0, 0x00000000, 0x00000001 },
4409 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
4410
4411 { 0x2800, 0, 0x00000000, 0x00000001 },
4412 { 0x2804, 0, 0x00000000, 0x00003f01 },
4413 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4414 { 0x2810, 0, 0xffff0000, 0x00000000 },
4415 { 0x2814, 0, 0xffff0000, 0x00000000 },
4416 { 0x2818, 0, 0xffff0000, 0x00000000 },
4417 { 0x281c, 0, 0xffff0000, 0x00000000 },
4418 { 0x2834, 0, 0xffffffff, 0x00000000 },
4419 { 0x2840, 0, 0x00000000, 0xffffffff },
4420 { 0x2844, 0, 0x00000000, 0xffffffff },
4421 { 0x2848, 0, 0xffffffff, 0x00000000 },
4422 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4423
4424 { 0x2c00, 0, 0x00000000, 0x00000011 },
4425 { 0x2c04, 0, 0x00000000, 0x00030007 },
4426
b6016b76
MC
4427 { 0x3c00, 0, 0x00000000, 0x00000001 },
4428 { 0x3c04, 0, 0x00000000, 0x00070000 },
4429 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4430 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4431 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4432 { 0x3c14, 0, 0x00000000, 0xffffffff },
4433 { 0x3c18, 0, 0x00000000, 0xffffffff },
4434 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4435 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
4436
4437 { 0x5004, 0, 0x00000000, 0x0000007f },
4438 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 4439
b6016b76
MC
4440 { 0x5c00, 0, 0x00000000, 0x00000001 },
4441 { 0x5c04, 0, 0x00000000, 0x0003000f },
4442 { 0x5c08, 0, 0x00000003, 0x00000000 },
4443 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4444 { 0x5c10, 0, 0x00000000, 0xffffffff },
4445 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4446 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4447 { 0x5c88, 0, 0x00000000, 0x00077373 },
4448 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4449
4450 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4451 { 0x680c, 0, 0xffffffff, 0x00000000 },
4452 { 0x6810, 0, 0xffffffff, 0x00000000 },
4453 { 0x6814, 0, 0xffffffff, 0x00000000 },
4454 { 0x6818, 0, 0xffffffff, 0x00000000 },
4455 { 0x681c, 0, 0xffffffff, 0x00000000 },
4456 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4457 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4458 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4459 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4460 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4461 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4462 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4463 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4464 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4465 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4466 { 0x684c, 0, 0xffffffff, 0x00000000 },
4467 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4468 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4469 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4470 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4471 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4472 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4473
4474 { 0xffff, 0, 0x00000000, 0x00000000 },
4475 };
4476
4477 ret = 0;
5bae30c9
MC
4478 is_5709 = 0;
4479 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4480 is_5709 = 1;
4481
b6016b76
MC
4482 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4483 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
4484 u16 flags = reg_tbl[i].flags;
4485
4486 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4487 continue;
b6016b76
MC
4488
4489 offset = (u32) reg_tbl[i].offset;
4490 rw_mask = reg_tbl[i].rw_mask;
4491 ro_mask = reg_tbl[i].ro_mask;
4492
14ab9b86 4493 save_val = readl(bp->regview + offset);
b6016b76 4494
14ab9b86 4495 writel(0, bp->regview + offset);
b6016b76 4496
14ab9b86 4497 val = readl(bp->regview + offset);
b6016b76
MC
4498 if ((val & rw_mask) != 0) {
4499 goto reg_test_err;
4500 }
4501
4502 if ((val & ro_mask) != (save_val & ro_mask)) {
4503 goto reg_test_err;
4504 }
4505
14ab9b86 4506 writel(0xffffffff, bp->regview + offset);
b6016b76 4507
14ab9b86 4508 val = readl(bp->regview + offset);
b6016b76
MC
4509 if ((val & rw_mask) != rw_mask) {
4510 goto reg_test_err;
4511 }
4512
4513 if ((val & ro_mask) != (save_val & ro_mask)) {
4514 goto reg_test_err;
4515 }
4516
14ab9b86 4517 writel(save_val, bp->regview + offset);
b6016b76
MC
4518 continue;
4519
4520reg_test_err:
14ab9b86 4521 writel(save_val, bp->regview + offset);
b6016b76
MC
4522 ret = -ENODEV;
4523 break;
4524 }
4525 return ret;
4526}
4527
4528static int
4529bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4530{
f71e1309 4531 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
4532 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4533 int i;
4534
4535 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4536 u32 offset;
4537
4538 for (offset = 0; offset < size; offset += 4) {
4539
4540 REG_WR_IND(bp, start + offset, test_pattern[i]);
4541
4542 if (REG_RD_IND(bp, start + offset) !=
4543 test_pattern[i]) {
4544 return -ENODEV;
4545 }
4546 }
4547 }
4548 return 0;
4549}
4550
4551static int
4552bnx2_test_memory(struct bnx2 *bp)
4553{
4554 int ret = 0;
4555 int i;
5bae30c9 4556 static struct mem_entry {
b6016b76
MC
4557 u32 offset;
4558 u32 len;
5bae30c9 4559 } mem_tbl_5706[] = {
b6016b76 4560 { 0x60000, 0x4000 },
5b0c76ad 4561 { 0xa0000, 0x3000 },
b6016b76
MC
4562 { 0xe0000, 0x4000 },
4563 { 0x120000, 0x4000 },
4564 { 0x1a0000, 0x4000 },
4565 { 0x160000, 0x4000 },
4566 { 0xffffffff, 0 },
5bae30c9
MC
4567 },
4568 mem_tbl_5709[] = {
4569 { 0x60000, 0x4000 },
4570 { 0xa0000, 0x3000 },
4571 { 0xe0000, 0x4000 },
4572 { 0x120000, 0x4000 },
4573 { 0x1a0000, 0x4000 },
4574 { 0xffffffff, 0 },
b6016b76 4575 };
5bae30c9
MC
4576 struct mem_entry *mem_tbl;
4577
4578 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4579 mem_tbl = mem_tbl_5709;
4580 else
4581 mem_tbl = mem_tbl_5706;
b6016b76
MC
4582
4583 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4584 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4585 mem_tbl[i].len)) != 0) {
4586 return ret;
4587 }
4588 }
6aa20a22 4589
b6016b76
MC
4590 return ret;
4591}
4592
bc5a0690
MC
4593#define BNX2_MAC_LOOPBACK 0
4594#define BNX2_PHY_LOOPBACK 1
4595
b6016b76 4596static int
bc5a0690 4597bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
4598{
4599 unsigned int pkt_size, num_pkts, i;
4600 struct sk_buff *skb, *rx_skb;
4601 unsigned char *packet;
bc5a0690 4602 u16 rx_start_idx, rx_idx;
b6016b76
MC
4603 dma_addr_t map;
4604 struct tx_bd *txbd;
4605 struct sw_bd *rx_buf;
4606 struct l2_fhdr *rx_hdr;
4607 int ret = -ENODEV;
4608
bc5a0690
MC
4609 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4610 bp->loopback = MAC_LOOPBACK;
4611 bnx2_set_mac_loopback(bp);
4612 }
4613 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
489310a4
MC
4614 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4615 return 0;
4616
80be4434 4617 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
4618 bnx2_set_phy_loopback(bp);
4619 }
4620 else
4621 return -EINVAL;
b6016b76
MC
4622
4623 pkt_size = 1514;
932f3772 4624 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
4625 if (!skb)
4626 return -ENOMEM;
b6016b76 4627 packet = skb_put(skb, pkt_size);
6634292b 4628 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
4629 memset(packet + 6, 0x0, 8);
4630 for (i = 14; i < pkt_size; i++)
4631 packet[i] = (unsigned char) (i & 0xff);
4632
4633 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4634 PCI_DMA_TODEVICE);
4635
bf5295bb
MC
4636 REG_WR(bp, BNX2_HC_COMMAND,
4637 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4638
b6016b76
MC
4639 REG_RD(bp, BNX2_HC_COMMAND);
4640
4641 udelay(5);
4642 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4643
b6016b76
MC
4644 num_pkts = 0;
4645
bc5a0690 4646 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
4647
4648 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4649 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4650 txbd->tx_bd_mss_nbytes = pkt_size;
4651 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4652
4653 num_pkts++;
bc5a0690
MC
4654 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4655 bp->tx_prod_bseq += pkt_size;
b6016b76 4656
234754d5
MC
4657 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4658 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
4659
4660 udelay(100);
4661
bf5295bb
MC
4662 REG_WR(bp, BNX2_HC_COMMAND,
4663 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4664
b6016b76
MC
4665 REG_RD(bp, BNX2_HC_COMMAND);
4666
4667 udelay(5);
4668
4669 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 4670 dev_kfree_skb(skb);
b6016b76 4671
bc5a0690 4672 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
b6016b76
MC
4673 goto loopback_test_done;
4674 }
4675
4676 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4677 if (rx_idx != rx_start_idx + num_pkts) {
4678 goto loopback_test_done;
4679 }
4680
4681 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4682 rx_skb = rx_buf->skb;
4683
4684 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4685 skb_reserve(rx_skb, bp->rx_offset);
4686
4687 pci_dma_sync_single_for_cpu(bp->pdev,
4688 pci_unmap_addr(rx_buf, mapping),
4689 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4690
ade2bfe7 4691 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
4692 (L2_FHDR_ERRORS_BAD_CRC |
4693 L2_FHDR_ERRORS_PHY_DECODE |
4694 L2_FHDR_ERRORS_ALIGNMENT |
4695 L2_FHDR_ERRORS_TOO_SHORT |
4696 L2_FHDR_ERRORS_GIANT_FRAME)) {
4697
4698 goto loopback_test_done;
4699 }
4700
4701 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4702 goto loopback_test_done;
4703 }
4704
4705 for (i = 14; i < pkt_size; i++) {
4706 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4707 goto loopback_test_done;
4708 }
4709 }
4710
4711 ret = 0;
4712
4713loopback_test_done:
4714 bp->loopback = 0;
4715 return ret;
4716}
4717
bc5a0690
MC
4718#define BNX2_MAC_LOOPBACK_FAILED 1
4719#define BNX2_PHY_LOOPBACK_FAILED 2
4720#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4721 BNX2_PHY_LOOPBACK_FAILED)
4722
4723static int
4724bnx2_test_loopback(struct bnx2 *bp)
4725{
4726 int rc = 0;
4727
4728 if (!netif_running(bp->dev))
4729 return BNX2_LOOPBACK_FAILED;
4730
4731 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4732 spin_lock_bh(&bp->phy_lock);
4733 bnx2_init_phy(bp);
4734 spin_unlock_bh(&bp->phy_lock);
4735 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4736 rc |= BNX2_MAC_LOOPBACK_FAILED;
4737 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4738 rc |= BNX2_PHY_LOOPBACK_FAILED;
4739 return rc;
4740}
4741
b6016b76
MC
4742#define NVRAM_SIZE 0x200
4743#define CRC32_RESIDUAL 0xdebb20e3
4744
4745static int
4746bnx2_test_nvram(struct bnx2 *bp)
4747{
4748 u32 buf[NVRAM_SIZE / 4];
4749 u8 *data = (u8 *) buf;
4750 int rc = 0;
4751 u32 magic, csum;
4752
4753 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4754 goto test_nvram_done;
4755
4756 magic = be32_to_cpu(buf[0]);
4757 if (magic != 0x669955aa) {
4758 rc = -ENODEV;
4759 goto test_nvram_done;
4760 }
4761
4762 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4763 goto test_nvram_done;
4764
4765 csum = ether_crc_le(0x100, data);
4766 if (csum != CRC32_RESIDUAL) {
4767 rc = -ENODEV;
4768 goto test_nvram_done;
4769 }
4770
4771 csum = ether_crc_le(0x100, data + 0x100);
4772 if (csum != CRC32_RESIDUAL) {
4773 rc = -ENODEV;
4774 }
4775
4776test_nvram_done:
4777 return rc;
4778}
4779
4780static int
4781bnx2_test_link(struct bnx2 *bp)
4782{
4783 u32 bmsr;
4784
489310a4
MC
4785 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
4786 if (bp->link_up)
4787 return 0;
4788 return -ENODEV;
4789 }
c770a65c 4790 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
4791 bnx2_enable_bmsr1(bp);
4792 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4793 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4794 bnx2_disable_bmsr1(bp);
c770a65c 4795 spin_unlock_bh(&bp->phy_lock);
6aa20a22 4796
b6016b76
MC
4797 if (bmsr & BMSR_LSTATUS) {
4798 return 0;
4799 }
4800 return -ENODEV;
4801}
4802
4803static int
4804bnx2_test_intr(struct bnx2 *bp)
4805{
4806 int i;
b6016b76
MC
4807 u16 status_idx;
4808
4809 if (!netif_running(bp->dev))
4810 return -ENODEV;
4811
4812 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4813
4814 /* This register is not touched during run-time. */
bf5295bb 4815 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
4816 REG_RD(bp, BNX2_HC_COMMAND);
4817
4818 for (i = 0; i < 10; i++) {
4819 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4820 status_idx) {
4821
4822 break;
4823 }
4824
4825 msleep_interruptible(10);
4826 }
4827 if (i < 10)
4828 return 0;
4829
4830 return -ENODEV;
4831}
4832
4833static void
48b01e2d 4834bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 4835{
48b01e2d
MC
4836 spin_lock(&bp->phy_lock);
4837 if (bp->serdes_an_pending)
4838 bp->serdes_an_pending--;
4839 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4840 u32 bmcr;
b6016b76 4841
48b01e2d 4842 bp->current_interval = bp->timer_interval;
cd339a0e 4843
ca58c3af 4844 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 4845
48b01e2d
MC
4846 if (bmcr & BMCR_ANENABLE) {
4847 u32 phy1, phy2;
b6016b76 4848
48b01e2d
MC
4849 bnx2_write_phy(bp, 0x1c, 0x7c00);
4850 bnx2_read_phy(bp, 0x1c, &phy1);
cea94db9 4851
48b01e2d
MC
4852 bnx2_write_phy(bp, 0x17, 0x0f01);
4853 bnx2_read_phy(bp, 0x15, &phy2);
4854 bnx2_write_phy(bp, 0x17, 0x0f01);
4855 bnx2_read_phy(bp, 0x15, &phy2);
b6016b76 4856
48b01e2d
MC
4857 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4858 !(phy2 & 0x20)) { /* no CONFIG */
4859
4860 bmcr &= ~BMCR_ANENABLE;
4861 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 4862 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
48b01e2d
MC
4863 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4864 }
b6016b76 4865 }
48b01e2d
MC
4866 }
4867 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4868 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4869 u32 phy2;
b6016b76 4870
48b01e2d
MC
4871 bnx2_write_phy(bp, 0x17, 0x0f01);
4872 bnx2_read_phy(bp, 0x15, &phy2);
4873 if (phy2 & 0x20) {
4874 u32 bmcr;
cd339a0e 4875
ca58c3af 4876 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 4877 bmcr |= BMCR_ANENABLE;
ca58c3af 4878 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 4879
48b01e2d
MC
4880 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4881 }
4882 } else
4883 bp->current_interval = bp->timer_interval;
b6016b76 4884
48b01e2d
MC
4885 spin_unlock(&bp->phy_lock);
4886}
b6016b76 4887
f8dd064e
MC
4888static void
4889bnx2_5708_serdes_timer(struct bnx2 *bp)
4890{
0d8a6571
MC
4891 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4892 return;
4893
f8dd064e
MC
4894 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4895 bp->serdes_an_pending = 0;
4896 return;
4897 }
b6016b76 4898
f8dd064e
MC
4899 spin_lock(&bp->phy_lock);
4900 if (bp->serdes_an_pending)
4901 bp->serdes_an_pending--;
4902 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4903 u32 bmcr;
b6016b76 4904
ca58c3af 4905 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 4906 if (bmcr & BMCR_ANENABLE) {
605a9e20 4907 bnx2_enable_forced_2g5(bp);
f8dd064e
MC
4908 bp->current_interval = SERDES_FORCED_TIMEOUT;
4909 } else {
605a9e20 4910 bnx2_disable_forced_2g5(bp);
f8dd064e
MC
4911 bp->serdes_an_pending = 2;
4912 bp->current_interval = bp->timer_interval;
b6016b76 4913 }
b6016b76 4914
f8dd064e
MC
4915 } else
4916 bp->current_interval = bp->timer_interval;
b6016b76 4917
f8dd064e
MC
4918 spin_unlock(&bp->phy_lock);
4919}
4920
48b01e2d
MC
4921static void
4922bnx2_timer(unsigned long data)
4923{
4924 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 4925
48b01e2d
MC
4926 if (!netif_running(bp->dev))
4927 return;
b6016b76 4928
48b01e2d
MC
4929 if (atomic_read(&bp->intr_sem) != 0)
4930 goto bnx2_restart_timer;
b6016b76 4931
df149d70 4932 bnx2_send_heart_beat(bp);
b6016b76 4933
48b01e2d 4934 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 4935
02537b06
MC
4936 /* workaround occasional corrupted counters */
4937 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4938 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4939 BNX2_HC_COMMAND_STATS_NOW);
4940
f8dd064e
MC
4941 if (bp->phy_flags & PHY_SERDES_FLAG) {
4942 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4943 bnx2_5706_serdes_timer(bp);
27a005b8 4944 else
f8dd064e 4945 bnx2_5708_serdes_timer(bp);
b6016b76
MC
4946 }
4947
4948bnx2_restart_timer:
cd339a0e 4949 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4950}
4951
8e6a72c4
MC
4952static int
4953bnx2_request_irq(struct bnx2 *bp)
4954{
4955 struct net_device *dev = bp->dev;
4956 int rc = 0;
4957
4958 if (bp->flags & USING_MSI_FLAG) {
4959 irq_handler_t fn = bnx2_msi;
4960
4961 if (bp->flags & ONE_SHOT_MSI_FLAG)
4962 fn = bnx2_msi_1shot;
4963
4964 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4965 } else
4966 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4967 IRQF_SHARED, dev->name, dev);
4968 return rc;
4969}
4970
4971static void
4972bnx2_free_irq(struct bnx2 *bp)
4973{
4974 struct net_device *dev = bp->dev;
4975
4976 if (bp->flags & USING_MSI_FLAG) {
4977 free_irq(bp->pdev->irq, dev);
4978 pci_disable_msi(bp->pdev);
4979 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4980 } else
4981 free_irq(bp->pdev->irq, dev);
4982}
4983
b6016b76
MC
4984/* Called with rtnl_lock */
4985static int
4986bnx2_open(struct net_device *dev)
4987{
972ec0d4 4988 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4989 int rc;
4990
1b2f922f
MC
4991 netif_carrier_off(dev);
4992
829ca9a3 4993 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
4994 bnx2_disable_int(bp);
4995
4996 rc = bnx2_alloc_mem(bp);
4997 if (rc)
4998 return rc;
4999
bea3348e
SH
5000 napi_enable(&bp->napi);
5001
8e6a72c4 5002 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
b6016b76
MC
5003 if (pci_enable_msi(bp->pdev) == 0) {
5004 bp->flags |= USING_MSI_FLAG;
8e6a72c4
MC
5005 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5006 bp->flags |= ONE_SHOT_MSI_FLAG;
b6016b76 5007 }
b6016b76 5008 }
8e6a72c4
MC
5009 rc = bnx2_request_irq(bp);
5010
b6016b76 5011 if (rc) {
bea3348e 5012 napi_disable(&bp->napi);
b6016b76
MC
5013 bnx2_free_mem(bp);
5014 return rc;
5015 }
5016
5017 rc = bnx2_init_nic(bp);
5018
5019 if (rc) {
bea3348e 5020 napi_disable(&bp->napi);
8e6a72c4 5021 bnx2_free_irq(bp);
b6016b76
MC
5022 bnx2_free_skbs(bp);
5023 bnx2_free_mem(bp);
5024 return rc;
5025 }
6aa20a22 5026
cd339a0e 5027 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5028
5029 atomic_set(&bp->intr_sem, 0);
5030
5031 bnx2_enable_int(bp);
5032
5033 if (bp->flags & USING_MSI_FLAG) {
5034 /* Test MSI to make sure it is working
5035 * If MSI test fails, go back to INTx mode
5036 */
5037 if (bnx2_test_intr(bp) != 0) {
5038 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5039 " using MSI, switching to INTx mode. Please"
5040 " report this failure to the PCI maintainer"
5041 " and include system chipset information.\n",
5042 bp->dev->name);
5043
5044 bnx2_disable_int(bp);
8e6a72c4 5045 bnx2_free_irq(bp);
b6016b76
MC
5046
5047 rc = bnx2_init_nic(bp);
5048
8e6a72c4
MC
5049 if (!rc)
5050 rc = bnx2_request_irq(bp);
5051
b6016b76 5052 if (rc) {
bea3348e 5053 napi_disable(&bp->napi);
b6016b76
MC
5054 bnx2_free_skbs(bp);
5055 bnx2_free_mem(bp);
5056 del_timer_sync(&bp->timer);
5057 return rc;
5058 }
5059 bnx2_enable_int(bp);
5060 }
5061 }
5062 if (bp->flags & USING_MSI_FLAG) {
5063 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5064 }
5065
5066 netif_start_queue(dev);
5067
5068 return 0;
5069}
5070
5071static void
c4028958 5072bnx2_reset_task(struct work_struct *work)
b6016b76 5073{
c4028958 5074 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 5075
afdc08b9
MC
5076 if (!netif_running(bp->dev))
5077 return;
5078
5079 bp->in_reset_task = 1;
b6016b76
MC
5080 bnx2_netif_stop(bp);
5081
5082 bnx2_init_nic(bp);
5083
5084 atomic_set(&bp->intr_sem, 1);
5085 bnx2_netif_start(bp);
afdc08b9 5086 bp->in_reset_task = 0;
b6016b76
MC
5087}
5088
5089static void
5090bnx2_tx_timeout(struct net_device *dev)
5091{
972ec0d4 5092 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5093
5094 /* This allows the netif to be shutdown gracefully before resetting */
5095 schedule_work(&bp->reset_task);
5096}
5097
5098#ifdef BCM_VLAN
5099/* Called with rtnl_lock */
5100static void
5101bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5102{
972ec0d4 5103 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5104
5105 bnx2_netif_stop(bp);
5106
5107 bp->vlgrp = vlgrp;
5108 bnx2_set_rx_mode(dev);
5109
5110 bnx2_netif_start(bp);
5111}
b6016b76
MC
5112#endif
5113
932ff279 5114/* Called with netif_tx_lock.
2f8af120
MC
5115 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5116 * netif_wake_queue().
b6016b76
MC
5117 */
5118static int
5119bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5120{
972ec0d4 5121 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5122 dma_addr_t mapping;
5123 struct tx_bd *txbd;
5124 struct sw_bd *tx_buf;
5125 u32 len, vlan_tag_flags, last_frag, mss;
5126 u16 prod, ring_prod;
5127 int i;
5128
e89bbf10 5129 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
5130 netif_stop_queue(dev);
5131 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5132 dev->name);
5133
5134 return NETDEV_TX_BUSY;
5135 }
5136 len = skb_headlen(skb);
5137 prod = bp->tx_prod;
5138 ring_prod = TX_RING_IDX(prod);
5139
5140 vlan_tag_flags = 0;
84fa7933 5141 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
5142 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5143 }
5144
5145 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5146 vlan_tag_flags |=
5147 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5148 }
fde82055 5149 if ((mss = skb_shinfo(skb)->gso_size)) {
b6016b76 5150 u32 tcp_opt_len, ip_tcp_len;
eddc9ec5 5151 struct iphdr *iph;
b6016b76 5152
b6016b76
MC
5153 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5154
4666f87a
MC
5155 tcp_opt_len = tcp_optlen(skb);
5156
5157 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5158 u32 tcp_off = skb_transport_offset(skb) -
5159 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 5160
4666f87a
MC
5161 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5162 TX_BD_FLAGS_SW_FLAGS;
5163 if (likely(tcp_off == 0))
5164 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5165 else {
5166 tcp_off >>= 3;
5167 vlan_tag_flags |= ((tcp_off & 0x3) <<
5168 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5169 ((tcp_off & 0x10) <<
5170 TX_BD_FLAGS_TCP6_OFF4_SHL);
5171 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5172 }
5173 } else {
5174 if (skb_header_cloned(skb) &&
5175 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5176 dev_kfree_skb(skb);
5177 return NETDEV_TX_OK;
5178 }
b6016b76 5179
4666f87a
MC
5180 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5181
5182 iph = ip_hdr(skb);
5183 iph->check = 0;
5184 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5185 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5186 iph->daddr, 0,
5187 IPPROTO_TCP,
5188 0);
5189 if (tcp_opt_len || (iph->ihl > 5)) {
5190 vlan_tag_flags |= ((iph->ihl - 5) +
5191 (tcp_opt_len >> 2)) << 8;
5192 }
b6016b76 5193 }
4666f87a 5194 } else
b6016b76 5195 mss = 0;
b6016b76
MC
5196
5197 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 5198
b6016b76
MC
5199 tx_buf = &bp->tx_buf_ring[ring_prod];
5200 tx_buf->skb = skb;
5201 pci_unmap_addr_set(tx_buf, mapping, mapping);
5202
5203 txbd = &bp->tx_desc_ring[ring_prod];
5204
5205 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5206 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5207 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5208 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5209
5210 last_frag = skb_shinfo(skb)->nr_frags;
5211
5212 for (i = 0; i < last_frag; i++) {
5213 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5214
5215 prod = NEXT_TX_BD(prod);
5216 ring_prod = TX_RING_IDX(prod);
5217 txbd = &bp->tx_desc_ring[ring_prod];
5218
5219 len = frag->size;
5220 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5221 len, PCI_DMA_TODEVICE);
5222 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5223 mapping, mapping);
5224
5225 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5226 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5227 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5228 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5229
5230 }
5231 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5232
5233 prod = NEXT_TX_BD(prod);
5234 bp->tx_prod_bseq += skb->len;
5235
234754d5
MC
5236 REG_WR16(bp, bp->tx_bidx_addr, prod);
5237 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
5238
5239 mmiowb();
5240
5241 bp->tx_prod = prod;
5242 dev->trans_start = jiffies;
5243
e89bbf10 5244 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
e89bbf10 5245 netif_stop_queue(dev);
2f8af120 5246 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
e89bbf10 5247 netif_wake_queue(dev);
b6016b76
MC
5248 }
5249
5250 return NETDEV_TX_OK;
5251}
5252
5253/* Called with rtnl_lock */
5254static int
5255bnx2_close(struct net_device *dev)
5256{
972ec0d4 5257 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5258 u32 reset_code;
5259
afdc08b9
MC
5260 /* Calling flush_scheduled_work() may deadlock because
5261 * linkwatch_event() may be on the workqueue and it will try to get
5262 * the rtnl_lock which we are holding.
5263 */
5264 while (bp->in_reset_task)
5265 msleep(1);
5266
bea3348e
SH
5267 bnx2_disable_int_sync(bp);
5268 napi_disable(&bp->napi);
b6016b76 5269 del_timer_sync(&bp->timer);
dda1e390 5270 if (bp->flags & NO_WOL_FLAG)
6c4f095e 5271 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 5272 else if (bp->wol)
b6016b76
MC
5273 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5274 else
5275 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5276 bnx2_reset_chip(bp, reset_code);
8e6a72c4 5277 bnx2_free_irq(bp);
b6016b76
MC
5278 bnx2_free_skbs(bp);
5279 bnx2_free_mem(bp);
5280 bp->link_up = 0;
5281 netif_carrier_off(bp->dev);
829ca9a3 5282 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
5283 return 0;
5284}
5285
5286#define GET_NET_STATS64(ctr) \
5287 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5288 (unsigned long) (ctr##_lo)
5289
5290#define GET_NET_STATS32(ctr) \
5291 (ctr##_lo)
5292
5293#if (BITS_PER_LONG == 64)
5294#define GET_NET_STATS GET_NET_STATS64
5295#else
5296#define GET_NET_STATS GET_NET_STATS32
5297#endif
5298
5299static struct net_device_stats *
5300bnx2_get_stats(struct net_device *dev)
5301{
972ec0d4 5302 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5303 struct statistics_block *stats_blk = bp->stats_blk;
5304 struct net_device_stats *net_stats = &bp->net_stats;
5305
5306 if (bp->stats_blk == NULL) {
5307 return net_stats;
5308 }
5309 net_stats->rx_packets =
5310 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5311 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5312 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5313
5314 net_stats->tx_packets =
5315 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5316 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5317 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5318
5319 net_stats->rx_bytes =
5320 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5321
5322 net_stats->tx_bytes =
5323 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5324
6aa20a22 5325 net_stats->multicast =
b6016b76
MC
5326 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5327
6aa20a22 5328 net_stats->collisions =
b6016b76
MC
5329 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5330
6aa20a22 5331 net_stats->rx_length_errors =
b6016b76
MC
5332 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5333 stats_blk->stat_EtherStatsOverrsizePkts);
5334
6aa20a22 5335 net_stats->rx_over_errors =
b6016b76
MC
5336 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5337
6aa20a22 5338 net_stats->rx_frame_errors =
b6016b76
MC
5339 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5340
6aa20a22 5341 net_stats->rx_crc_errors =
b6016b76
MC
5342 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5343
5344 net_stats->rx_errors = net_stats->rx_length_errors +
5345 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5346 net_stats->rx_crc_errors;
5347
5348 net_stats->tx_aborted_errors =
5349 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5350 stats_blk->stat_Dot3StatsLateCollisions);
5351
5b0c76ad
MC
5352 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5353 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
5354 net_stats->tx_carrier_errors = 0;
5355 else {
5356 net_stats->tx_carrier_errors =
5357 (unsigned long)
5358 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5359 }
5360
5361 net_stats->tx_errors =
6aa20a22 5362 (unsigned long)
b6016b76
MC
5363 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5364 +
5365 net_stats->tx_aborted_errors +
5366 net_stats->tx_carrier_errors;
5367
cea94db9
MC
5368 net_stats->rx_missed_errors =
5369 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5370 stats_blk->stat_FwRxDrop);
5371
b6016b76
MC
5372 return net_stats;
5373}
5374
5375/* All ethtool functions called with rtnl_lock */
5376
5377static int
5378bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5379{
972ec0d4 5380 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 5381 int support_serdes = 0, support_copper = 0;
b6016b76
MC
5382
5383 cmd->supported = SUPPORTED_Autoneg;
7b6b8347
MC
5384 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5385 support_serdes = 1;
5386 support_copper = 1;
5387 } else if (bp->phy_port == PORT_FIBRE)
5388 support_serdes = 1;
5389 else
5390 support_copper = 1;
5391
5392 if (support_serdes) {
b6016b76
MC
5393 cmd->supported |= SUPPORTED_1000baseT_Full |
5394 SUPPORTED_FIBRE;
605a9e20
MC
5395 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5396 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 5397
b6016b76 5398 }
7b6b8347 5399 if (support_copper) {
b6016b76
MC
5400 cmd->supported |= SUPPORTED_10baseT_Half |
5401 SUPPORTED_10baseT_Full |
5402 SUPPORTED_100baseT_Half |
5403 SUPPORTED_100baseT_Full |
5404 SUPPORTED_1000baseT_Full |
5405 SUPPORTED_TP;
5406
b6016b76
MC
5407 }
5408
7b6b8347
MC
5409 spin_lock_bh(&bp->phy_lock);
5410 cmd->port = bp->phy_port;
b6016b76
MC
5411 cmd->advertising = bp->advertising;
5412
5413 if (bp->autoneg & AUTONEG_SPEED) {
5414 cmd->autoneg = AUTONEG_ENABLE;
5415 }
5416 else {
5417 cmd->autoneg = AUTONEG_DISABLE;
5418 }
5419
5420 if (netif_carrier_ok(dev)) {
5421 cmd->speed = bp->line_speed;
5422 cmd->duplex = bp->duplex;
5423 }
5424 else {
5425 cmd->speed = -1;
5426 cmd->duplex = -1;
5427 }
7b6b8347 5428 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5429
5430 cmd->transceiver = XCVR_INTERNAL;
5431 cmd->phy_address = bp->phy_addr;
5432
5433 return 0;
5434}
6aa20a22 5435
b6016b76
MC
5436static int
5437bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5438{
972ec0d4 5439 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5440 u8 autoneg = bp->autoneg;
5441 u8 req_duplex = bp->req_duplex;
5442 u16 req_line_speed = bp->req_line_speed;
5443 u32 advertising = bp->advertising;
7b6b8347
MC
5444 int err = -EINVAL;
5445
5446 spin_lock_bh(&bp->phy_lock);
5447
5448 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5449 goto err_out_unlock;
5450
5451 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5452 goto err_out_unlock;
b6016b76
MC
5453
5454 if (cmd->autoneg == AUTONEG_ENABLE) {
5455 autoneg |= AUTONEG_SPEED;
5456
6aa20a22 5457 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5458
5459 /* allow advertising 1 speed */
5460 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5461 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5462 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5463 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5464
7b6b8347
MC
5465 if (cmd->port == PORT_FIBRE)
5466 goto err_out_unlock;
b6016b76
MC
5467
5468 advertising = cmd->advertising;
5469
27a005b8 5470 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
7b6b8347
MC
5471 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5472 (cmd->port == PORT_TP))
5473 goto err_out_unlock;
5474 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 5475 advertising = cmd->advertising;
7b6b8347
MC
5476 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5477 goto err_out_unlock;
b6016b76 5478 else {
7b6b8347 5479 if (cmd->port == PORT_FIBRE)
b6016b76 5480 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 5481 else
b6016b76 5482 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5483 }
5484 advertising |= ADVERTISED_Autoneg;
5485 }
5486 else {
7b6b8347 5487 if (cmd->port == PORT_FIBRE) {
80be4434
MC
5488 if ((cmd->speed != SPEED_1000 &&
5489 cmd->speed != SPEED_2500) ||
5490 (cmd->duplex != DUPLEX_FULL))
7b6b8347 5491 goto err_out_unlock;
80be4434
MC
5492
5493 if (cmd->speed == SPEED_2500 &&
5494 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
7b6b8347 5495 goto err_out_unlock;
b6016b76 5496 }
7b6b8347
MC
5497 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5498 goto err_out_unlock;
5499
b6016b76
MC
5500 autoneg &= ~AUTONEG_SPEED;
5501 req_line_speed = cmd->speed;
5502 req_duplex = cmd->duplex;
5503 advertising = 0;
5504 }
5505
5506 bp->autoneg = autoneg;
5507 bp->advertising = advertising;
5508 bp->req_line_speed = req_line_speed;
5509 bp->req_duplex = req_duplex;
5510
7b6b8347 5511 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 5512
7b6b8347 5513err_out_unlock:
c770a65c 5514 spin_unlock_bh(&bp->phy_lock);
b6016b76 5515
7b6b8347 5516 return err;
b6016b76
MC
5517}
5518
5519static void
5520bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5521{
972ec0d4 5522 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5523
5524 strcpy(info->driver, DRV_MODULE_NAME);
5525 strcpy(info->version, DRV_MODULE_VERSION);
5526 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 5527 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
5528}
5529
244ac4f4
MC
5530#define BNX2_REGDUMP_LEN (32 * 1024)
5531
5532static int
5533bnx2_get_regs_len(struct net_device *dev)
5534{
5535 return BNX2_REGDUMP_LEN;
5536}
5537
5538static void
5539bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5540{
5541 u32 *p = _p, i, offset;
5542 u8 *orig_p = _p;
5543 struct bnx2 *bp = netdev_priv(dev);
5544 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5545 0x0800, 0x0880, 0x0c00, 0x0c10,
5546 0x0c30, 0x0d08, 0x1000, 0x101c,
5547 0x1040, 0x1048, 0x1080, 0x10a4,
5548 0x1400, 0x1490, 0x1498, 0x14f0,
5549 0x1500, 0x155c, 0x1580, 0x15dc,
5550 0x1600, 0x1658, 0x1680, 0x16d8,
5551 0x1800, 0x1820, 0x1840, 0x1854,
5552 0x1880, 0x1894, 0x1900, 0x1984,
5553 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5554 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5555 0x2000, 0x2030, 0x23c0, 0x2400,
5556 0x2800, 0x2820, 0x2830, 0x2850,
5557 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5558 0x3c00, 0x3c94, 0x4000, 0x4010,
5559 0x4080, 0x4090, 0x43c0, 0x4458,
5560 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5561 0x4fc0, 0x5010, 0x53c0, 0x5444,
5562 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5563 0x5fc0, 0x6000, 0x6400, 0x6428,
5564 0x6800, 0x6848, 0x684c, 0x6860,
5565 0x6888, 0x6910, 0x8000 };
5566
5567 regs->version = 0;
5568
5569 memset(p, 0, BNX2_REGDUMP_LEN);
5570
5571 if (!netif_running(bp->dev))
5572 return;
5573
5574 i = 0;
5575 offset = reg_boundaries[0];
5576 p += offset;
5577 while (offset < BNX2_REGDUMP_LEN) {
5578 *p++ = REG_RD(bp, offset);
5579 offset += 4;
5580 if (offset == reg_boundaries[i + 1]) {
5581 offset = reg_boundaries[i + 2];
5582 p = (u32 *) (orig_p + offset);
5583 i += 2;
5584 }
5585 }
5586}
5587
b6016b76
MC
5588static void
5589bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5590{
972ec0d4 5591 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5592
5593 if (bp->flags & NO_WOL_FLAG) {
5594 wol->supported = 0;
5595 wol->wolopts = 0;
5596 }
5597 else {
5598 wol->supported = WAKE_MAGIC;
5599 if (bp->wol)
5600 wol->wolopts = WAKE_MAGIC;
5601 else
5602 wol->wolopts = 0;
5603 }
5604 memset(&wol->sopass, 0, sizeof(wol->sopass));
5605}
5606
5607static int
5608bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5609{
972ec0d4 5610 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5611
5612 if (wol->wolopts & ~WAKE_MAGIC)
5613 return -EINVAL;
5614
5615 if (wol->wolopts & WAKE_MAGIC) {
5616 if (bp->flags & NO_WOL_FLAG)
5617 return -EINVAL;
5618
5619 bp->wol = 1;
5620 }
5621 else {
5622 bp->wol = 0;
5623 }
5624 return 0;
5625}
5626
5627static int
5628bnx2_nway_reset(struct net_device *dev)
5629{
972ec0d4 5630 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5631 u32 bmcr;
5632
5633 if (!(bp->autoneg & AUTONEG_SPEED)) {
5634 return -EINVAL;
5635 }
5636
c770a65c 5637 spin_lock_bh(&bp->phy_lock);
b6016b76 5638
7b6b8347
MC
5639 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5640 int rc;
5641
5642 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5643 spin_unlock_bh(&bp->phy_lock);
5644 return rc;
5645 }
5646
b6016b76
MC
5647 /* Force a link down visible on the other side */
5648 if (bp->phy_flags & PHY_SERDES_FLAG) {
ca58c3af 5649 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 5650 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5651
5652 msleep(20);
5653
c770a65c 5654 spin_lock_bh(&bp->phy_lock);
f8dd064e
MC
5655
5656 bp->current_interval = SERDES_AN_TIMEOUT;
5657 bp->serdes_an_pending = 1;
5658 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5659 }
5660
ca58c3af 5661 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5662 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 5663 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 5664
c770a65c 5665 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5666
5667 return 0;
5668}
5669
5670static int
5671bnx2_get_eeprom_len(struct net_device *dev)
5672{
972ec0d4 5673 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5674
1122db71 5675 if (bp->flash_info == NULL)
b6016b76
MC
5676 return 0;
5677
1122db71 5678 return (int) bp->flash_size;
b6016b76
MC
5679}
5680
5681static int
5682bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5683 u8 *eebuf)
5684{
972ec0d4 5685 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5686 int rc;
5687
1064e944 5688 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
5689
5690 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5691
5692 return rc;
5693}
5694
5695static int
5696bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5697 u8 *eebuf)
5698{
972ec0d4 5699 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5700 int rc;
5701
1064e944 5702 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
5703
5704 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5705
5706 return rc;
5707}
5708
5709static int
5710bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5711{
972ec0d4 5712 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5713
5714 memset(coal, 0, sizeof(struct ethtool_coalesce));
5715
5716 coal->rx_coalesce_usecs = bp->rx_ticks;
5717 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5718 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5719 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5720
5721 coal->tx_coalesce_usecs = bp->tx_ticks;
5722 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5723 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5724 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5725
5726 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5727
5728 return 0;
5729}
5730
5731static int
5732bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5733{
972ec0d4 5734 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5735
5736 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5737 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5738
6aa20a22 5739 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
5740 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5741
5742 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5743 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5744
5745 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5746 if (bp->rx_quick_cons_trip_int > 0xff)
5747 bp->rx_quick_cons_trip_int = 0xff;
5748
5749 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5750 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5751
5752 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5753 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5754
5755 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5756 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5757
5758 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5759 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5760 0xff;
5761
5762 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
5763 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5764 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5765 bp->stats_ticks = USEC_PER_SEC;
5766 }
7ea6920e
MC
5767 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5768 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5769 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
5770
5771 if (netif_running(bp->dev)) {
5772 bnx2_netif_stop(bp);
5773 bnx2_init_nic(bp);
5774 bnx2_netif_start(bp);
5775 }
5776
5777 return 0;
5778}
5779
5780static void
5781bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5782{
972ec0d4 5783 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5784
13daffa2 5785 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76
MC
5786 ering->rx_mini_max_pending = 0;
5787 ering->rx_jumbo_max_pending = 0;
5788
5789 ering->rx_pending = bp->rx_ring_size;
5790 ering->rx_mini_pending = 0;
5791 ering->rx_jumbo_pending = 0;
5792
5793 ering->tx_max_pending = MAX_TX_DESC_CNT;
5794 ering->tx_pending = bp->tx_ring_size;
5795}
5796
5797static int
5798bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5799{
972ec0d4 5800 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5801
13daffa2 5802 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
b6016b76
MC
5803 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5804 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5805
5806 return -EINVAL;
5807 }
13daffa2
MC
5808 if (netif_running(bp->dev)) {
5809 bnx2_netif_stop(bp);
5810 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5811 bnx2_free_skbs(bp);
5812 bnx2_free_mem(bp);
5813 }
5814
5815 bnx2_set_rx_ring_size(bp, ering->rx_pending);
b6016b76
MC
5816 bp->tx_ring_size = ering->tx_pending;
5817
5818 if (netif_running(bp->dev)) {
13daffa2
MC
5819 int rc;
5820
5821 rc = bnx2_alloc_mem(bp);
5822 if (rc)
5823 return rc;
b6016b76
MC
5824 bnx2_init_nic(bp);
5825 bnx2_netif_start(bp);
5826 }
5827
5828 return 0;
5829}
5830
5831static void
5832bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5833{
972ec0d4 5834 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5835
5836 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5837 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5838 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5839}
5840
5841static int
5842bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5843{
972ec0d4 5844 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5845
5846 bp->req_flow_ctrl = 0;
5847 if (epause->rx_pause)
5848 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5849 if (epause->tx_pause)
5850 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5851
5852 if (epause->autoneg) {
5853 bp->autoneg |= AUTONEG_FLOW_CTRL;
5854 }
5855 else {
5856 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5857 }
5858
c770a65c 5859 spin_lock_bh(&bp->phy_lock);
b6016b76 5860
0d8a6571 5861 bnx2_setup_phy(bp, bp->phy_port);
b6016b76 5862
c770a65c 5863 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5864
5865 return 0;
5866}
5867
5868static u32
5869bnx2_get_rx_csum(struct net_device *dev)
5870{
972ec0d4 5871 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5872
5873 return bp->rx_csum;
5874}
5875
5876static int
5877bnx2_set_rx_csum(struct net_device *dev, u32 data)
5878{
972ec0d4 5879 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5880
5881 bp->rx_csum = data;
5882 return 0;
5883}
5884
b11d6213
MC
5885static int
5886bnx2_set_tso(struct net_device *dev, u32 data)
5887{
4666f87a
MC
5888 struct bnx2 *bp = netdev_priv(dev);
5889
5890 if (data) {
b11d6213 5891 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
5892 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5893 dev->features |= NETIF_F_TSO6;
5894 } else
5895 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5896 NETIF_F_TSO_ECN);
b11d6213
MC
5897 return 0;
5898}
5899
cea94db9 5900#define BNX2_NUM_STATS 46
b6016b76 5901
14ab9b86 5902static struct {
b6016b76
MC
5903 char string[ETH_GSTRING_LEN];
5904} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5905 { "rx_bytes" },
5906 { "rx_error_bytes" },
5907 { "tx_bytes" },
5908 { "tx_error_bytes" },
5909 { "rx_ucast_packets" },
5910 { "rx_mcast_packets" },
5911 { "rx_bcast_packets" },
5912 { "tx_ucast_packets" },
5913 { "tx_mcast_packets" },
5914 { "tx_bcast_packets" },
5915 { "tx_mac_errors" },
5916 { "tx_carrier_errors" },
5917 { "rx_crc_errors" },
5918 { "rx_align_errors" },
5919 { "tx_single_collisions" },
5920 { "tx_multi_collisions" },
5921 { "tx_deferred" },
5922 { "tx_excess_collisions" },
5923 { "tx_late_collisions" },
5924 { "tx_total_collisions" },
5925 { "rx_fragments" },
5926 { "rx_jabbers" },
5927 { "rx_undersize_packets" },
5928 { "rx_oversize_packets" },
5929 { "rx_64_byte_packets" },
5930 { "rx_65_to_127_byte_packets" },
5931 { "rx_128_to_255_byte_packets" },
5932 { "rx_256_to_511_byte_packets" },
5933 { "rx_512_to_1023_byte_packets" },
5934 { "rx_1024_to_1522_byte_packets" },
5935 { "rx_1523_to_9022_byte_packets" },
5936 { "tx_64_byte_packets" },
5937 { "tx_65_to_127_byte_packets" },
5938 { "tx_128_to_255_byte_packets" },
5939 { "tx_256_to_511_byte_packets" },
5940 { "tx_512_to_1023_byte_packets" },
5941 { "tx_1024_to_1522_byte_packets" },
5942 { "tx_1523_to_9022_byte_packets" },
5943 { "rx_xon_frames" },
5944 { "rx_xoff_frames" },
5945 { "tx_xon_frames" },
5946 { "tx_xoff_frames" },
5947 { "rx_mac_ctrl_frames" },
5948 { "rx_filtered_packets" },
5949 { "rx_discards" },
cea94db9 5950 { "rx_fw_discards" },
b6016b76
MC
5951};
5952
5953#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5954
f71e1309 5955static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5956 STATS_OFFSET32(stat_IfHCInOctets_hi),
5957 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5958 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5959 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5960 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5961 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5962 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5963 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5964 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5965 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5966 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
5967 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5968 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5969 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5970 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5971 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5972 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5973 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5974 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5975 STATS_OFFSET32(stat_EtherStatsCollisions),
5976 STATS_OFFSET32(stat_EtherStatsFragments),
5977 STATS_OFFSET32(stat_EtherStatsJabbers),
5978 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5979 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5980 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5981 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5982 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5983 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5984 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5985 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5986 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5987 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5988 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5989 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5990 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5991 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5992 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5993 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5994 STATS_OFFSET32(stat_XonPauseFramesReceived),
5995 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5996 STATS_OFFSET32(stat_OutXonSent),
5997 STATS_OFFSET32(stat_OutXoffSent),
5998 STATS_OFFSET32(stat_MacControlFramesReceived),
5999 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6000 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 6001 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
6002};
6003
6004/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6005 * skipped because of errata.
6aa20a22 6006 */
14ab9b86 6007static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6008 8,0,8,8,8,8,8,8,8,8,
6009 4,0,4,4,4,4,4,4,4,4,
6010 4,4,4,4,4,4,4,4,4,4,
6011 4,4,4,4,4,4,4,4,4,4,
cea94db9 6012 4,4,4,4,4,4,
b6016b76
MC
6013};
6014
5b0c76ad
MC
6015static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6016 8,0,8,8,8,8,8,8,8,8,
6017 4,4,4,4,4,4,4,4,4,4,
6018 4,4,4,4,4,4,4,4,4,4,
6019 4,4,4,4,4,4,4,4,4,4,
cea94db9 6020 4,4,4,4,4,4,
5b0c76ad
MC
6021};
6022
b6016b76
MC
6023#define BNX2_NUM_TESTS 6
6024
14ab9b86 6025static struct {
b6016b76
MC
6026 char string[ETH_GSTRING_LEN];
6027} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6028 { "register_test (offline)" },
6029 { "memory_test (offline)" },
6030 { "loopback_test (offline)" },
6031 { "nvram_test (online)" },
6032 { "interrupt_test (online)" },
6033 { "link_test (online)" },
6034};
6035
6036static int
b9f2c044 6037bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 6038{
b9f2c044
JG
6039 switch (sset) {
6040 case ETH_SS_TEST:
6041 return BNX2_NUM_TESTS;
6042 case ETH_SS_STATS:
6043 return BNX2_NUM_STATS;
6044 default:
6045 return -EOPNOTSUPP;
6046 }
b6016b76
MC
6047}
6048
6049static void
6050bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6051{
972ec0d4 6052 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6053
6054 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6055 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
6056 int i;
6057
b6016b76
MC
6058 bnx2_netif_stop(bp);
6059 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6060 bnx2_free_skbs(bp);
6061
6062 if (bnx2_test_registers(bp) != 0) {
6063 buf[0] = 1;
6064 etest->flags |= ETH_TEST_FL_FAILED;
6065 }
6066 if (bnx2_test_memory(bp) != 0) {
6067 buf[1] = 1;
6068 etest->flags |= ETH_TEST_FL_FAILED;
6069 }
bc5a0690 6070 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 6071 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
6072
6073 if (!netif_running(bp->dev)) {
6074 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6075 }
6076 else {
6077 bnx2_init_nic(bp);
6078 bnx2_netif_start(bp);
6079 }
6080
6081 /* wait for link up */
80be4434
MC
6082 for (i = 0; i < 7; i++) {
6083 if (bp->link_up)
6084 break;
6085 msleep_interruptible(1000);
6086 }
b6016b76
MC
6087 }
6088
6089 if (bnx2_test_nvram(bp) != 0) {
6090 buf[3] = 1;
6091 etest->flags |= ETH_TEST_FL_FAILED;
6092 }
6093 if (bnx2_test_intr(bp) != 0) {
6094 buf[4] = 1;
6095 etest->flags |= ETH_TEST_FL_FAILED;
6096 }
6097
6098 if (bnx2_test_link(bp) != 0) {
6099 buf[5] = 1;
6100 etest->flags |= ETH_TEST_FL_FAILED;
6101
6102 }
6103}
6104
6105static void
6106bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6107{
6108 switch (stringset) {
6109 case ETH_SS_STATS:
6110 memcpy(buf, bnx2_stats_str_arr,
6111 sizeof(bnx2_stats_str_arr));
6112 break;
6113 case ETH_SS_TEST:
6114 memcpy(buf, bnx2_tests_str_arr,
6115 sizeof(bnx2_tests_str_arr));
6116 break;
6117 }
6118}
6119
b6016b76
MC
6120static void
6121bnx2_get_ethtool_stats(struct net_device *dev,
6122 struct ethtool_stats *stats, u64 *buf)
6123{
972ec0d4 6124 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6125 int i;
6126 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 6127 u8 *stats_len_arr = NULL;
b6016b76
MC
6128
6129 if (hw_stats == NULL) {
6130 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6131 return;
6132 }
6133
5b0c76ad
MC
6134 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6135 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6136 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6137 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 6138 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
6139 else
6140 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
6141
6142 for (i = 0; i < BNX2_NUM_STATS; i++) {
6143 if (stats_len_arr[i] == 0) {
6144 /* skip this counter */
6145 buf[i] = 0;
6146 continue;
6147 }
6148 if (stats_len_arr[i] == 4) {
6149 /* 4-byte counter */
6150 buf[i] = (u64)
6151 *(hw_stats + bnx2_stats_offset_arr[i]);
6152 continue;
6153 }
6154 /* 8-byte counter */
6155 buf[i] = (((u64) *(hw_stats +
6156 bnx2_stats_offset_arr[i])) << 32) +
6157 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6158 }
6159}
6160
6161static int
6162bnx2_phys_id(struct net_device *dev, u32 data)
6163{
972ec0d4 6164 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6165 int i;
6166 u32 save;
6167
6168 if (data == 0)
6169 data = 2;
6170
6171 save = REG_RD(bp, BNX2_MISC_CFG);
6172 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6173
6174 for (i = 0; i < (data * 2); i++) {
6175 if ((i % 2) == 0) {
6176 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6177 }
6178 else {
6179 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6180 BNX2_EMAC_LED_1000MB_OVERRIDE |
6181 BNX2_EMAC_LED_100MB_OVERRIDE |
6182 BNX2_EMAC_LED_10MB_OVERRIDE |
6183 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6184 BNX2_EMAC_LED_TRAFFIC);
6185 }
6186 msleep_interruptible(500);
6187 if (signal_pending(current))
6188 break;
6189 }
6190 REG_WR(bp, BNX2_EMAC_LED, 0);
6191 REG_WR(bp, BNX2_MISC_CFG, save);
6192 return 0;
6193}
6194
4666f87a
MC
6195static int
6196bnx2_set_tx_csum(struct net_device *dev, u32 data)
6197{
6198 struct bnx2 *bp = netdev_priv(dev);
6199
6200 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 6201 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
6202 else
6203 return (ethtool_op_set_tx_csum(dev, data));
6204}
6205
7282d491 6206static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
6207 .get_settings = bnx2_get_settings,
6208 .set_settings = bnx2_set_settings,
6209 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
6210 .get_regs_len = bnx2_get_regs_len,
6211 .get_regs = bnx2_get_regs,
b6016b76
MC
6212 .get_wol = bnx2_get_wol,
6213 .set_wol = bnx2_set_wol,
6214 .nway_reset = bnx2_nway_reset,
6215 .get_link = ethtool_op_get_link,
6216 .get_eeprom_len = bnx2_get_eeprom_len,
6217 .get_eeprom = bnx2_get_eeprom,
6218 .set_eeprom = bnx2_set_eeprom,
6219 .get_coalesce = bnx2_get_coalesce,
6220 .set_coalesce = bnx2_set_coalesce,
6221 .get_ringparam = bnx2_get_ringparam,
6222 .set_ringparam = bnx2_set_ringparam,
6223 .get_pauseparam = bnx2_get_pauseparam,
6224 .set_pauseparam = bnx2_set_pauseparam,
6225 .get_rx_csum = bnx2_get_rx_csum,
6226 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 6227 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 6228 .set_sg = ethtool_op_set_sg,
b11d6213 6229 .set_tso = bnx2_set_tso,
b6016b76
MC
6230 .self_test = bnx2_self_test,
6231 .get_strings = bnx2_get_strings,
6232 .phys_id = bnx2_phys_id,
b6016b76 6233 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 6234 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
6235};
6236
6237/* Called with rtnl_lock */
6238static int
6239bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6240{
14ab9b86 6241 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 6242 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6243 int err;
6244
6245 switch(cmd) {
6246 case SIOCGMIIPHY:
6247 data->phy_id = bp->phy_addr;
6248
6249 /* fallthru */
6250 case SIOCGMIIREG: {
6251 u32 mii_regval;
6252
7b6b8347
MC
6253 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6254 return -EOPNOTSUPP;
6255
dad3e452
MC
6256 if (!netif_running(dev))
6257 return -EAGAIN;
6258
c770a65c 6259 spin_lock_bh(&bp->phy_lock);
b6016b76 6260 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 6261 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6262
6263 data->val_out = mii_regval;
6264
6265 return err;
6266 }
6267
6268 case SIOCSMIIREG:
6269 if (!capable(CAP_NET_ADMIN))
6270 return -EPERM;
6271
7b6b8347
MC
6272 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6273 return -EOPNOTSUPP;
6274
dad3e452
MC
6275 if (!netif_running(dev))
6276 return -EAGAIN;
6277
c770a65c 6278 spin_lock_bh(&bp->phy_lock);
b6016b76 6279 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 6280 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6281
6282 return err;
6283
6284 default:
6285 /* do nothing */
6286 break;
6287 }
6288 return -EOPNOTSUPP;
6289}
6290
6291/* Called with rtnl_lock */
6292static int
6293bnx2_change_mac_addr(struct net_device *dev, void *p)
6294{
6295 struct sockaddr *addr = p;
972ec0d4 6296 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6297
73eef4cd
MC
6298 if (!is_valid_ether_addr(addr->sa_data))
6299 return -EINVAL;
6300
b6016b76
MC
6301 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6302 if (netif_running(dev))
6303 bnx2_set_mac_addr(bp);
6304
6305 return 0;
6306}
6307
6308/* Called with rtnl_lock */
6309static int
6310bnx2_change_mtu(struct net_device *dev, int new_mtu)
6311{
972ec0d4 6312 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6313
6314 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6315 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6316 return -EINVAL;
6317
6318 dev->mtu = new_mtu;
6319 if (netif_running(dev)) {
6320 bnx2_netif_stop(bp);
6321
6322 bnx2_init_nic(bp);
6323
6324 bnx2_netif_start(bp);
6325 }
6326 return 0;
6327}
6328
6329#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6330static void
6331poll_bnx2(struct net_device *dev)
6332{
972ec0d4 6333 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6334
6335 disable_irq(bp->pdev->irq);
7d12e780 6336 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
6337 enable_irq(bp->pdev->irq);
6338}
6339#endif
6340
253c8b75
MC
6341static void __devinit
6342bnx2_get_5709_media(struct bnx2 *bp)
6343{
6344 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6345 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6346 u32 strap;
6347
6348 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6349 return;
6350 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6351 bp->phy_flags |= PHY_SERDES_FLAG;
6352 return;
6353 }
6354
6355 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6356 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6357 else
6358 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6359
6360 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6361 switch (strap) {
6362 case 0x4:
6363 case 0x5:
6364 case 0x6:
6365 bp->phy_flags |= PHY_SERDES_FLAG;
6366 return;
6367 }
6368 } else {
6369 switch (strap) {
6370 case 0x1:
6371 case 0x2:
6372 case 0x4:
6373 bp->phy_flags |= PHY_SERDES_FLAG;
6374 return;
6375 }
6376 }
6377}
6378
883e5151
MC
6379static void __devinit
6380bnx2_get_pci_speed(struct bnx2 *bp)
6381{
6382 u32 reg;
6383
6384 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6385 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6386 u32 clkreg;
6387
6388 bp->flags |= PCIX_FLAG;
6389
6390 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6391
6392 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6393 switch (clkreg) {
6394 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6395 bp->bus_speed_mhz = 133;
6396 break;
6397
6398 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6399 bp->bus_speed_mhz = 100;
6400 break;
6401
6402 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6403 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6404 bp->bus_speed_mhz = 66;
6405 break;
6406
6407 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6408 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6409 bp->bus_speed_mhz = 50;
6410 break;
6411
6412 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6413 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6414 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6415 bp->bus_speed_mhz = 33;
6416 break;
6417 }
6418 }
6419 else {
6420 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6421 bp->bus_speed_mhz = 66;
6422 else
6423 bp->bus_speed_mhz = 33;
6424 }
6425
6426 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6427 bp->flags |= PCI_32BIT_FLAG;
6428
6429}
6430
b6016b76
MC
6431static int __devinit
6432bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6433{
6434 struct bnx2 *bp;
6435 unsigned long mem_len;
58fc2ea4 6436 int rc, i, j;
b6016b76 6437 u32 reg;
40453c83 6438 u64 dma_mask, persist_dma_mask;
b6016b76 6439
b6016b76 6440 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 6441 bp = netdev_priv(dev);
b6016b76
MC
6442
6443 bp->flags = 0;
6444 bp->phy_flags = 0;
6445
6446 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6447 rc = pci_enable_device(pdev);
6448 if (rc) {
898eb71c 6449 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
b6016b76
MC
6450 goto err_out;
6451 }
6452
6453 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 6454 dev_err(&pdev->dev,
2e8a538d 6455 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
6456 rc = -ENODEV;
6457 goto err_out_disable;
6458 }
6459
6460 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6461 if (rc) {
9b91cf9d 6462 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
6463 goto err_out_disable;
6464 }
6465
6466 pci_set_master(pdev);
6467
6468 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6469 if (bp->pm_cap == 0) {
9b91cf9d 6470 dev_err(&pdev->dev,
2e8a538d 6471 "Cannot find power management capability, aborting.\n");
b6016b76
MC
6472 rc = -EIO;
6473 goto err_out_release;
6474 }
6475
b6016b76
MC
6476 bp->dev = dev;
6477 bp->pdev = pdev;
6478
6479 spin_lock_init(&bp->phy_lock);
1b8227c4 6480 spin_lock_init(&bp->indirect_lock);
c4028958 6481 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
6482
6483 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
59b47d8a 6484 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
b6016b76
MC
6485 dev->mem_end = dev->mem_start + mem_len;
6486 dev->irq = pdev->irq;
6487
6488 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6489
6490 if (!bp->regview) {
9b91cf9d 6491 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
6492 rc = -ENOMEM;
6493 goto err_out_release;
6494 }
6495
6496 /* Configure byte swap and enable write to the reg_window registers.
6497 * Rely on CPU to do target byte swapping on big endian systems
6498 * The chip's target access swapping will not swap all accesses
6499 */
6500 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6501 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6502 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6503
829ca9a3 6504 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6505
6506 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6507
883e5151
MC
6508 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6509 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6510 dev_err(&pdev->dev,
6511 "Cannot find PCIE capability, aborting.\n");
6512 rc = -EIO;
6513 goto err_out_unmap;
6514 }
6515 bp->flags |= PCIE_FLAG;
6516 } else {
59b47d8a
MC
6517 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6518 if (bp->pcix_cap == 0) {
6519 dev_err(&pdev->dev,
6520 "Cannot find PCIX capability, aborting.\n");
6521 rc = -EIO;
6522 goto err_out_unmap;
6523 }
6524 }
6525
8e6a72c4
MC
6526 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6527 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6528 bp->flags |= MSI_CAP_FLAG;
6529 }
6530
40453c83
MC
6531 /* 5708 cannot support DMA addresses > 40-bit. */
6532 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6533 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6534 else
6535 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6536
6537 /* Configure DMA attributes. */
6538 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6539 dev->features |= NETIF_F_HIGHDMA;
6540 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6541 if (rc) {
6542 dev_err(&pdev->dev,
6543 "pci_set_consistent_dma_mask failed, aborting.\n");
6544 goto err_out_unmap;
6545 }
6546 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6547 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6548 goto err_out_unmap;
6549 }
6550
883e5151
MC
6551 if (!(bp->flags & PCIE_FLAG))
6552 bnx2_get_pci_speed(bp);
b6016b76
MC
6553
6554 /* 5706A0 may falsely detect SERR and PERR. */
6555 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6556 reg = REG_RD(bp, PCI_COMMAND);
6557 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6558 REG_WR(bp, PCI_COMMAND, reg);
6559 }
6560 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6561 !(bp->flags & PCIX_FLAG)) {
6562
9b91cf9d 6563 dev_err(&pdev->dev,
2e8a538d 6564 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
6565 goto err_out_unmap;
6566 }
6567
6568 bnx2_init_nvram(bp);
6569
e3648b3d
MC
6570 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6571
6572 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
6573 BNX2_SHM_HDR_SIGNATURE_SIG) {
6574 u32 off = PCI_FUNC(pdev->devfn) << 2;
6575
6576 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6577 } else
e3648b3d
MC
6578 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6579
b6016b76
MC
6580 /* Get the permanent MAC address. First we need to make sure the
6581 * firmware is actually running.
6582 */
e3648b3d 6583 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
6584
6585 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6586 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 6587 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
6588 rc = -ENODEV;
6589 goto err_out_unmap;
6590 }
6591
58fc2ea4
MC
6592 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6593 for (i = 0, j = 0; i < 3; i++) {
6594 u8 num, k, skip0;
6595
6596 num = (u8) (reg >> (24 - (i * 8)));
6597 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6598 if (num >= k || !skip0 || k == 1) {
6599 bp->fw_version[j++] = (num / k) + '0';
6600 skip0 = 0;
6601 }
6602 }
6603 if (i != 2)
6604 bp->fw_version[j++] = '.';
6605 }
846f5c62
MC
6606 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6607 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6608 bp->wol = 1;
6609
6610 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
c2d3db8c
MC
6611 bp->flags |= ASF_ENABLE_FLAG;
6612
6613 for (i = 0; i < 30; i++) {
6614 reg = REG_RD_IND(bp, bp->shmem_base +
6615 BNX2_BC_STATE_CONDITION);
6616 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6617 break;
6618 msleep(10);
6619 }
6620 }
58fc2ea4
MC
6621 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6622 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6623 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6624 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6625 int i;
6626 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6627
6628 bp->fw_version[j++] = ' ';
6629 for (i = 0; i < 3; i++) {
6630 reg = REG_RD_IND(bp, addr + i * 4);
6631 reg = swab32(reg);
6632 memcpy(&bp->fw_version[j], &reg, 4);
6633 j += 4;
6634 }
6635 }
b6016b76 6636
e3648b3d 6637 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
6638 bp->mac_addr[0] = (u8) (reg >> 8);
6639 bp->mac_addr[1] = (u8) reg;
6640
e3648b3d 6641 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
6642 bp->mac_addr[2] = (u8) (reg >> 24);
6643 bp->mac_addr[3] = (u8) (reg >> 16);
6644 bp->mac_addr[4] = (u8) (reg >> 8);
6645 bp->mac_addr[5] = (u8) reg;
6646
6647 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 6648 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
6649
6650 bp->rx_csum = 1;
6651
6652 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6653
6654 bp->tx_quick_cons_trip_int = 20;
6655 bp->tx_quick_cons_trip = 20;
6656 bp->tx_ticks_int = 80;
6657 bp->tx_ticks = 80;
6aa20a22 6658
b6016b76
MC
6659 bp->rx_quick_cons_trip_int = 6;
6660 bp->rx_quick_cons_trip = 6;
6661 bp->rx_ticks_int = 18;
6662 bp->rx_ticks = 18;
6663
7ea6920e 6664 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6665
6666 bp->timer_interval = HZ;
cd339a0e 6667 bp->current_interval = HZ;
b6016b76 6668
5b0c76ad
MC
6669 bp->phy_addr = 1;
6670
b6016b76 6671 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
6672 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6673 bnx2_get_5709_media(bp);
6674 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
b6016b76 6675 bp->phy_flags |= PHY_SERDES_FLAG;
bac0dff6 6676
0d8a6571 6677 bp->phy_port = PORT_TP;
bac0dff6 6678 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 6679 bp->phy_port = PORT_FIBRE;
846f5c62
MC
6680 reg = REG_RD_IND(bp, bp->shmem_base +
6681 BNX2_SHARED_HW_CFG_CONFIG);
6682 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6683 bp->flags |= NO_WOL_FLAG;
6684 bp->wol = 0;
6685 }
bac0dff6 6686 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5b0c76ad 6687 bp->phy_addr = 2;
5b0c76ad
MC
6688 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6689 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6690 }
0d8a6571
MC
6691 bnx2_init_remote_phy(bp);
6692
261dd5ca
MC
6693 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6694 CHIP_NUM(bp) == CHIP_NUM_5708)
6695 bp->phy_flags |= PHY_CRC_FIX_FLAG;
fb0c18bd
MC
6696 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6697 (CHIP_REV(bp) == CHIP_REV_Ax ||
6698 CHIP_REV(bp) == CHIP_REV_Bx))
b659f44e 6699 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
b6016b76 6700
16088272
MC
6701 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6702 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
846f5c62 6703 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
dda1e390 6704 bp->flags |= NO_WOL_FLAG;
846f5c62
MC
6705 bp->wol = 0;
6706 }
dda1e390 6707
b6016b76
MC
6708 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6709 bp->tx_quick_cons_trip_int =
6710 bp->tx_quick_cons_trip;
6711 bp->tx_ticks_int = bp->tx_ticks;
6712 bp->rx_quick_cons_trip_int =
6713 bp->rx_quick_cons_trip;
6714 bp->rx_ticks_int = bp->rx_ticks;
6715 bp->comp_prod_trip_int = bp->comp_prod_trip;
6716 bp->com_ticks_int = bp->com_ticks;
6717 bp->cmd_ticks_int = bp->cmd_ticks;
6718 }
6719
f9317a40
MC
6720 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6721 *
6722 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6723 * with byte enables disabled on the unused 32-bit word. This is legal
6724 * but causes problems on the AMD 8132 which will eventually stop
6725 * responding after a while.
6726 *
6727 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 6728 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
6729 */
6730 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6731 struct pci_dev *amd_8132 = NULL;
6732
6733 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6734 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6735 amd_8132))) {
f9317a40 6736
44c10138
AK
6737 if (amd_8132->revision >= 0x10 &&
6738 amd_8132->revision <= 0x13) {
f9317a40
MC
6739 disable_msi = 1;
6740 pci_dev_put(amd_8132);
6741 break;
6742 }
6743 }
6744 }
6745
deaf391b 6746 bnx2_set_default_link(bp);
b6016b76
MC
6747 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6748
cd339a0e
MC
6749 init_timer(&bp->timer);
6750 bp->timer.expires = RUN_AT(bp->timer_interval);
6751 bp->timer.data = (unsigned long) bp;
6752 bp->timer.function = bnx2_timer;
6753
b6016b76
MC
6754 return 0;
6755
6756err_out_unmap:
6757 if (bp->regview) {
6758 iounmap(bp->regview);
73eef4cd 6759 bp->regview = NULL;
b6016b76
MC
6760 }
6761
6762err_out_release:
6763 pci_release_regions(pdev);
6764
6765err_out_disable:
6766 pci_disable_device(pdev);
6767 pci_set_drvdata(pdev, NULL);
6768
6769err_out:
6770 return rc;
6771}
6772
883e5151
MC
6773static char * __devinit
6774bnx2_bus_string(struct bnx2 *bp, char *str)
6775{
6776 char *s = str;
6777
6778 if (bp->flags & PCIE_FLAG) {
6779 s += sprintf(s, "PCI Express");
6780 } else {
6781 s += sprintf(s, "PCI");
6782 if (bp->flags & PCIX_FLAG)
6783 s += sprintf(s, "-X");
6784 if (bp->flags & PCI_32BIT_FLAG)
6785 s += sprintf(s, " 32-bit");
6786 else
6787 s += sprintf(s, " 64-bit");
6788 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6789 }
6790 return str;
6791}
6792
b6016b76
MC
6793static int __devinit
6794bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6795{
6796 static int version_printed = 0;
6797 struct net_device *dev = NULL;
6798 struct bnx2 *bp;
0795af57 6799 int rc;
883e5151 6800 char str[40];
0795af57 6801 DECLARE_MAC_BUF(mac);
b6016b76
MC
6802
6803 if (version_printed++ == 0)
6804 printk(KERN_INFO "%s", version);
6805
6806 /* dev zeroed in init_etherdev */
6807 dev = alloc_etherdev(sizeof(*bp));
6808
6809 if (!dev)
6810 return -ENOMEM;
6811
6812 rc = bnx2_init_board(pdev, dev);
6813 if (rc < 0) {
6814 free_netdev(dev);
6815 return rc;
6816 }
6817
6818 dev->open = bnx2_open;
6819 dev->hard_start_xmit = bnx2_start_xmit;
6820 dev->stop = bnx2_close;
6821 dev->get_stats = bnx2_get_stats;
6822 dev->set_multicast_list = bnx2_set_rx_mode;
6823 dev->do_ioctl = bnx2_ioctl;
6824 dev->set_mac_address = bnx2_change_mac_addr;
6825 dev->change_mtu = bnx2_change_mtu;
6826 dev->tx_timeout = bnx2_tx_timeout;
6827 dev->watchdog_timeo = TX_TIMEOUT;
6828#ifdef BCM_VLAN
6829 dev->vlan_rx_register = bnx2_vlan_rx_register;
b6016b76 6830#endif
b6016b76 6831 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 6832
972ec0d4 6833 bp = netdev_priv(dev);
bea3348e 6834 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
b6016b76
MC
6835
6836#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6837 dev->poll_controller = poll_bnx2;
6838#endif
6839
1b2f922f
MC
6840 pci_set_drvdata(pdev, dev);
6841
6842 memcpy(dev->dev_addr, bp->mac_addr, 6);
6843 memcpy(dev->perm_addr, bp->mac_addr, 6);
6844 bp->name = board_info[ent->driver_data].name;
6845
d212f87b 6846 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 6847 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
6848 dev->features |= NETIF_F_IPV6_CSUM;
6849
1b2f922f
MC
6850#ifdef BCM_VLAN
6851 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6852#endif
6853 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6854 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6855 dev->features |= NETIF_F_TSO6;
1b2f922f 6856
b6016b76 6857 if ((rc = register_netdev(dev))) {
9b91cf9d 6858 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
6859 if (bp->regview)
6860 iounmap(bp->regview);
6861 pci_release_regions(pdev);
6862 pci_disable_device(pdev);
6863 pci_set_drvdata(pdev, NULL);
6864 free_netdev(dev);
6865 return rc;
6866 }
6867
883e5151 6868 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
0795af57 6869 "IRQ %d, node addr %s\n",
b6016b76
MC
6870 dev->name,
6871 bp->name,
6872 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6873 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 6874 bnx2_bus_string(bp, str),
b6016b76 6875 dev->base_addr,
0795af57 6876 bp->pdev->irq, print_mac(mac, dev->dev_addr));
b6016b76 6877
b6016b76
MC
6878 return 0;
6879}
6880
6881static void __devexit
6882bnx2_remove_one(struct pci_dev *pdev)
6883{
6884 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6885 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6886
afdc08b9
MC
6887 flush_scheduled_work();
6888
b6016b76
MC
6889 unregister_netdev(dev);
6890
6891 if (bp->regview)
6892 iounmap(bp->regview);
6893
6894 free_netdev(dev);
6895 pci_release_regions(pdev);
6896 pci_disable_device(pdev);
6897 pci_set_drvdata(pdev, NULL);
6898}
6899
6900static int
829ca9a3 6901bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
6902{
6903 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6904 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6905 u32 reset_code;
6906
6caebb02
MC
6907 /* PCI register 4 needs to be saved whether netif_running() or not.
6908 * MSI address and data need to be saved if using MSI and
6909 * netif_running().
6910 */
6911 pci_save_state(pdev);
b6016b76
MC
6912 if (!netif_running(dev))
6913 return 0;
6914
1d60290f 6915 flush_scheduled_work();
b6016b76
MC
6916 bnx2_netif_stop(bp);
6917 netif_device_detach(dev);
6918 del_timer_sync(&bp->timer);
dda1e390 6919 if (bp->flags & NO_WOL_FLAG)
6c4f095e 6920 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 6921 else if (bp->wol)
b6016b76
MC
6922 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6923 else
6924 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6925 bnx2_reset_chip(bp, reset_code);
6926 bnx2_free_skbs(bp);
829ca9a3 6927 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
6928 return 0;
6929}
6930
6931static int
6932bnx2_resume(struct pci_dev *pdev)
6933{
6934 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6935 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6936
6caebb02 6937 pci_restore_state(pdev);
b6016b76
MC
6938 if (!netif_running(dev))
6939 return 0;
6940
829ca9a3 6941 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6942 netif_device_attach(dev);
6943 bnx2_init_nic(bp);
6944 bnx2_netif_start(bp);
6945 return 0;
6946}
6947
6948static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
6949 .name = DRV_MODULE_NAME,
6950 .id_table = bnx2_pci_tbl,
6951 .probe = bnx2_init_one,
6952 .remove = __devexit_p(bnx2_remove_one),
6953 .suspend = bnx2_suspend,
6954 .resume = bnx2_resume,
b6016b76
MC
6955};
6956
6957static int __init bnx2_init(void)
6958{
29917620 6959 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
6960}
6961
6962static void __exit bnx2_cleanup(void)
6963{
6964 pci_unregister_driver(&bnx2_pci_driver);
6965}
6966
6967module_init(bnx2_init);
6968module_exit(bnx2_cleanup);
6969
6970
6971