Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME "bnx2"
61 #define DRV_MODULE_VERSION "2.0.9"
62 #define DRV_MODULE_RELDATE "April 27, 2010"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
73
74 static char version[] __devinitdata =
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93 BCM5706 = 0,
94 NC370T,
95 NC370I,
96 BCM5706S,
97 NC370F,
98 BCM5708,
99 BCM5708S,
100 BCM5709,
101 BCM5709S,
102 BCM5716,
103 BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108 char *name;
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 { PCI_VENDOR_ID_BROADCOM, 0x163b,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 { PCI_VENDOR_ID_BROADCOM, 0x163c,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
153 /* Slow EEPROM */
154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 "EEPROM - slow"},
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 "Entry 0001"},
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 "Non-buffered flash (256kB)"},
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 "Entry 0100"},
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 "Non-buffered flash (64kB)"},
196 /* Fast EEPROM */
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 "EEPROM - fast"},
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1001"},
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1010"},
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1100"},
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1101"},
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239 .flags = BNX2_NV_BUFFERED,
240 .page_bits = BCM5709_FLASH_PAGE_BITS,
241 .page_size = BCM5709_FLASH_PAGE_SIZE,
242 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244 .name = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250
251 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
252 {
253 u32 diff;
254
255 smp_mb();
256
257 /* The ring uses 256 indices for 255 entries, one of them
258 * needs to be skipped.
259 */
260 diff = txr->tx_prod - txr->tx_cons;
261 if (unlikely(diff >= TX_DESC_CNT)) {
262 diff &= 0xffff;
263 if (diff == TX_DESC_CNT)
264 diff = MAX_TX_DESC_CNT;
265 }
266 return (bp->tx_ring_size - diff);
267 }
268
269 static u32
270 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271 {
272 u32 val;
273
274 spin_lock_bh(&bp->indirect_lock);
275 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
276 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277 spin_unlock_bh(&bp->indirect_lock);
278 return val;
279 }
280
281 static void
282 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283 {
284 spin_lock_bh(&bp->indirect_lock);
285 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
287 spin_unlock_bh(&bp->indirect_lock);
288 }
289
290 static void
291 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292 {
293 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294 }
295
296 static u32
297 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298 {
299 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
300 }
301
302 static void
303 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304 {
305 offset += cid_addr;
306 spin_lock_bh(&bp->indirect_lock);
307 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308 int i;
309
310 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313 for (i = 0; i < 5; i++) {
314 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316 break;
317 udelay(5);
318 }
319 } else {
320 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321 REG_WR(bp, BNX2_CTX_DATA, val);
322 }
323 spin_unlock_bh(&bp->indirect_lock);
324 }
325
326 #ifdef BCM_CNIC
327 static int
328 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329 {
330 struct bnx2 *bp = netdev_priv(dev);
331 struct drv_ctl_io *io = &info->data.io;
332
333 switch (info->cmd) {
334 case DRV_CTL_IO_WR_CMD:
335 bnx2_reg_wr_ind(bp, io->offset, io->data);
336 break;
337 case DRV_CTL_IO_RD_CMD:
338 io->data = bnx2_reg_rd_ind(bp, io->offset);
339 break;
340 case DRV_CTL_CTX_WR_CMD:
341 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342 break;
343 default:
344 return -EINVAL;
345 }
346 return 0;
347 }
348
349 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350 {
351 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353 int sb_id;
354
355 if (bp->flags & BNX2_FLAG_USING_MSIX) {
356 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357 bnapi->cnic_present = 0;
358 sb_id = bp->irq_nvecs;
359 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360 } else {
361 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362 bnapi->cnic_tag = bnapi->last_status_idx;
363 bnapi->cnic_present = 1;
364 sb_id = 0;
365 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366 }
367
368 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369 cp->irq_arr[0].status_blk = (void *)
370 ((unsigned long) bnapi->status_blk.msi +
371 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372 cp->irq_arr[0].status_blk_num = sb_id;
373 cp->num_irq = 1;
374 }
375
376 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377 void *data)
378 {
379 struct bnx2 *bp = netdev_priv(dev);
380 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382 if (ops == NULL)
383 return -EINVAL;
384
385 if (cp->drv_state & CNIC_DRV_STATE_REGD)
386 return -EBUSY;
387
388 bp->cnic_data = data;
389 rcu_assign_pointer(bp->cnic_ops, ops);
390
391 cp->num_irq = 0;
392 cp->drv_state = CNIC_DRV_STATE_REGD;
393
394 bnx2_setup_cnic_irq_info(bp);
395
396 return 0;
397 }
398
399 static int bnx2_unregister_cnic(struct net_device *dev)
400 {
401 struct bnx2 *bp = netdev_priv(dev);
402 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
404
405 mutex_lock(&bp->cnic_lock);
406 cp->drv_state = 0;
407 bnapi->cnic_present = 0;
408 rcu_assign_pointer(bp->cnic_ops, NULL);
409 mutex_unlock(&bp->cnic_lock);
410 synchronize_rcu();
411 return 0;
412 }
413
414 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
415 {
416 struct bnx2 *bp = netdev_priv(dev);
417 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
418
419 cp->drv_owner = THIS_MODULE;
420 cp->chip_id = bp->chip_id;
421 cp->pdev = bp->pdev;
422 cp->io_base = bp->regview;
423 cp->drv_ctl = bnx2_drv_ctl;
424 cp->drv_register_cnic = bnx2_register_cnic;
425 cp->drv_unregister_cnic = bnx2_unregister_cnic;
426
427 return cp;
428 }
429 EXPORT_SYMBOL(bnx2_cnic_probe);
430
431 static void
432 bnx2_cnic_stop(struct bnx2 *bp)
433 {
434 struct cnic_ops *c_ops;
435 struct cnic_ctl_info info;
436
437 mutex_lock(&bp->cnic_lock);
438 c_ops = bp->cnic_ops;
439 if (c_ops) {
440 info.cmd = CNIC_CTL_STOP_CMD;
441 c_ops->cnic_ctl(bp->cnic_data, &info);
442 }
443 mutex_unlock(&bp->cnic_lock);
444 }
445
446 static void
447 bnx2_cnic_start(struct bnx2 *bp)
448 {
449 struct cnic_ops *c_ops;
450 struct cnic_ctl_info info;
451
452 mutex_lock(&bp->cnic_lock);
453 c_ops = bp->cnic_ops;
454 if (c_ops) {
455 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
457
458 bnapi->cnic_tag = bnapi->last_status_idx;
459 }
460 info.cmd = CNIC_CTL_START_CMD;
461 c_ops->cnic_ctl(bp->cnic_data, &info);
462 }
463 mutex_unlock(&bp->cnic_lock);
464 }
465
466 #else
467
468 static void
469 bnx2_cnic_stop(struct bnx2 *bp)
470 {
471 }
472
473 static void
474 bnx2_cnic_start(struct bnx2 *bp)
475 {
476 }
477
478 #endif
479
480 static int
481 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
482 {
483 u32 val1;
484 int i, ret;
485
486 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
487 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
488 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
489
490 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
491 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
492
493 udelay(40);
494 }
495
496 val1 = (bp->phy_addr << 21) | (reg << 16) |
497 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
498 BNX2_EMAC_MDIO_COMM_START_BUSY;
499 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
500
501 for (i = 0; i < 50; i++) {
502 udelay(10);
503
504 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
505 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
506 udelay(5);
507
508 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
509 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
510
511 break;
512 }
513 }
514
515 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
516 *val = 0x0;
517 ret = -EBUSY;
518 }
519 else {
520 *val = val1;
521 ret = 0;
522 }
523
524 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
525 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
526 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
527
528 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
529 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
530
531 udelay(40);
532 }
533
534 return ret;
535 }
536
537 static int
538 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
539 {
540 u32 val1;
541 int i, ret;
542
543 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
544 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
545 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
546
547 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
548 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
549
550 udelay(40);
551 }
552
553 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
554 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
555 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
556 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
557
558 for (i = 0; i < 50; i++) {
559 udelay(10);
560
561 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
562 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
563 udelay(5);
564 break;
565 }
566 }
567
568 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
569 ret = -EBUSY;
570 else
571 ret = 0;
572
573 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
574 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
575 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
576
577 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
578 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
579
580 udelay(40);
581 }
582
583 return ret;
584 }
585
586 static void
587 bnx2_disable_int(struct bnx2 *bp)
588 {
589 int i;
590 struct bnx2_napi *bnapi;
591
592 for (i = 0; i < bp->irq_nvecs; i++) {
593 bnapi = &bp->bnx2_napi[i];
594 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
595 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
596 }
597 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
598 }
599
600 static void
601 bnx2_enable_int(struct bnx2 *bp)
602 {
603 int i;
604 struct bnx2_napi *bnapi;
605
606 for (i = 0; i < bp->irq_nvecs; i++) {
607 bnapi = &bp->bnx2_napi[i];
608
609 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
610 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
611 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
612 bnapi->last_status_idx);
613
614 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616 bnapi->last_status_idx);
617 }
618 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
619 }
620
621 static void
622 bnx2_disable_int_sync(struct bnx2 *bp)
623 {
624 int i;
625
626 atomic_inc(&bp->intr_sem);
627 if (!netif_running(bp->dev))
628 return;
629
630 bnx2_disable_int(bp);
631 for (i = 0; i < bp->irq_nvecs; i++)
632 synchronize_irq(bp->irq_tbl[i].vector);
633 }
634
635 static void
636 bnx2_napi_disable(struct bnx2 *bp)
637 {
638 int i;
639
640 for (i = 0; i < bp->irq_nvecs; i++)
641 napi_disable(&bp->bnx2_napi[i].napi);
642 }
643
644 static void
645 bnx2_napi_enable(struct bnx2 *bp)
646 {
647 int i;
648
649 for (i = 0; i < bp->irq_nvecs; i++)
650 napi_enable(&bp->bnx2_napi[i].napi);
651 }
652
653 static void
654 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
655 {
656 if (stop_cnic)
657 bnx2_cnic_stop(bp);
658 if (netif_running(bp->dev)) {
659 int i;
660
661 bnx2_napi_disable(bp);
662 netif_tx_disable(bp->dev);
663 /* prevent tx timeout */
664 for (i = 0; i < bp->dev->num_tx_queues; i++) {
665 struct netdev_queue *txq;
666
667 txq = netdev_get_tx_queue(bp->dev, i);
668 txq->trans_start = jiffies;
669 }
670 }
671 bnx2_disable_int_sync(bp);
672 }
673
674 static void
675 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
676 {
677 if (atomic_dec_and_test(&bp->intr_sem)) {
678 if (netif_running(bp->dev)) {
679 netif_tx_wake_all_queues(bp->dev);
680 bnx2_napi_enable(bp);
681 bnx2_enable_int(bp);
682 if (start_cnic)
683 bnx2_cnic_start(bp);
684 }
685 }
686 }
687
688 static void
689 bnx2_free_tx_mem(struct bnx2 *bp)
690 {
691 int i;
692
693 for (i = 0; i < bp->num_tx_rings; i++) {
694 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
695 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
696
697 if (txr->tx_desc_ring) {
698 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
699 txr->tx_desc_ring,
700 txr->tx_desc_mapping);
701 txr->tx_desc_ring = NULL;
702 }
703 kfree(txr->tx_buf_ring);
704 txr->tx_buf_ring = NULL;
705 }
706 }
707
708 static void
709 bnx2_free_rx_mem(struct bnx2 *bp)
710 {
711 int i;
712
713 for (i = 0; i < bp->num_rx_rings; i++) {
714 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
715 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
716 int j;
717
718 for (j = 0; j < bp->rx_max_ring; j++) {
719 if (rxr->rx_desc_ring[j])
720 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
721 rxr->rx_desc_ring[j],
722 rxr->rx_desc_mapping[j]);
723 rxr->rx_desc_ring[j] = NULL;
724 }
725 vfree(rxr->rx_buf_ring);
726 rxr->rx_buf_ring = NULL;
727
728 for (j = 0; j < bp->rx_max_pg_ring; j++) {
729 if (rxr->rx_pg_desc_ring[j])
730 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
731 rxr->rx_pg_desc_ring[j],
732 rxr->rx_pg_desc_mapping[j]);
733 rxr->rx_pg_desc_ring[j] = NULL;
734 }
735 vfree(rxr->rx_pg_ring);
736 rxr->rx_pg_ring = NULL;
737 }
738 }
739
740 static int
741 bnx2_alloc_tx_mem(struct bnx2 *bp)
742 {
743 int i;
744
745 for (i = 0; i < bp->num_tx_rings; i++) {
746 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
747 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
748
749 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
750 if (txr->tx_buf_ring == NULL)
751 return -ENOMEM;
752
753 txr->tx_desc_ring =
754 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
755 &txr->tx_desc_mapping);
756 if (txr->tx_desc_ring == NULL)
757 return -ENOMEM;
758 }
759 return 0;
760 }
761
762 static int
763 bnx2_alloc_rx_mem(struct bnx2 *bp)
764 {
765 int i;
766
767 for (i = 0; i < bp->num_rx_rings; i++) {
768 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
769 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
770 int j;
771
772 rxr->rx_buf_ring =
773 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
774 if (rxr->rx_buf_ring == NULL)
775 return -ENOMEM;
776
777 memset(rxr->rx_buf_ring, 0,
778 SW_RXBD_RING_SIZE * bp->rx_max_ring);
779
780 for (j = 0; j < bp->rx_max_ring; j++) {
781 rxr->rx_desc_ring[j] =
782 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
783 &rxr->rx_desc_mapping[j]);
784 if (rxr->rx_desc_ring[j] == NULL)
785 return -ENOMEM;
786
787 }
788
789 if (bp->rx_pg_ring_size) {
790 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
791 bp->rx_max_pg_ring);
792 if (rxr->rx_pg_ring == NULL)
793 return -ENOMEM;
794
795 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
796 bp->rx_max_pg_ring);
797 }
798
799 for (j = 0; j < bp->rx_max_pg_ring; j++) {
800 rxr->rx_pg_desc_ring[j] =
801 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
802 &rxr->rx_pg_desc_mapping[j]);
803 if (rxr->rx_pg_desc_ring[j] == NULL)
804 return -ENOMEM;
805
806 }
807 }
808 return 0;
809 }
810
811 static void
812 bnx2_free_mem(struct bnx2 *bp)
813 {
814 int i;
815 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
816
817 bnx2_free_tx_mem(bp);
818 bnx2_free_rx_mem(bp);
819
820 for (i = 0; i < bp->ctx_pages; i++) {
821 if (bp->ctx_blk[i]) {
822 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
823 bp->ctx_blk[i],
824 bp->ctx_blk_mapping[i]);
825 bp->ctx_blk[i] = NULL;
826 }
827 }
828 if (bnapi->status_blk.msi) {
829 pci_free_consistent(bp->pdev, bp->status_stats_size,
830 bnapi->status_blk.msi,
831 bp->status_blk_mapping);
832 bnapi->status_blk.msi = NULL;
833 bp->stats_blk = NULL;
834 }
835 }
836
837 static int
838 bnx2_alloc_mem(struct bnx2 *bp)
839 {
840 int i, status_blk_size, err;
841 struct bnx2_napi *bnapi;
842 void *status_blk;
843
844 /* Combine status and statistics blocks into one allocation. */
845 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
846 if (bp->flags & BNX2_FLAG_MSIX_CAP)
847 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
848 BNX2_SBLK_MSIX_ALIGN_SIZE);
849 bp->status_stats_size = status_blk_size +
850 sizeof(struct statistics_block);
851
852 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
853 &bp->status_blk_mapping);
854 if (status_blk == NULL)
855 goto alloc_mem_err;
856
857 memset(status_blk, 0, bp->status_stats_size);
858
859 bnapi = &bp->bnx2_napi[0];
860 bnapi->status_blk.msi = status_blk;
861 bnapi->hw_tx_cons_ptr =
862 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
863 bnapi->hw_rx_cons_ptr =
864 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
865 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
866 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
867 struct status_block_msix *sblk;
868
869 bnapi = &bp->bnx2_napi[i];
870
871 sblk = (void *) (status_blk +
872 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
873 bnapi->status_blk.msix = sblk;
874 bnapi->hw_tx_cons_ptr =
875 &sblk->status_tx_quick_consumer_index;
876 bnapi->hw_rx_cons_ptr =
877 &sblk->status_rx_quick_consumer_index;
878 bnapi->int_num = i << 24;
879 }
880 }
881
882 bp->stats_blk = status_blk + status_blk_size;
883
884 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
885
886 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
887 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
888 if (bp->ctx_pages == 0)
889 bp->ctx_pages = 1;
890 for (i = 0; i < bp->ctx_pages; i++) {
891 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
892 BCM_PAGE_SIZE,
893 &bp->ctx_blk_mapping[i]);
894 if (bp->ctx_blk[i] == NULL)
895 goto alloc_mem_err;
896 }
897 }
898
899 err = bnx2_alloc_rx_mem(bp);
900 if (err)
901 goto alloc_mem_err;
902
903 err = bnx2_alloc_tx_mem(bp);
904 if (err)
905 goto alloc_mem_err;
906
907 return 0;
908
909 alloc_mem_err:
910 bnx2_free_mem(bp);
911 return -ENOMEM;
912 }
913
914 static void
915 bnx2_report_fw_link(struct bnx2 *bp)
916 {
917 u32 fw_link_status = 0;
918
919 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
920 return;
921
922 if (bp->link_up) {
923 u32 bmsr;
924
925 switch (bp->line_speed) {
926 case SPEED_10:
927 if (bp->duplex == DUPLEX_HALF)
928 fw_link_status = BNX2_LINK_STATUS_10HALF;
929 else
930 fw_link_status = BNX2_LINK_STATUS_10FULL;
931 break;
932 case SPEED_100:
933 if (bp->duplex == DUPLEX_HALF)
934 fw_link_status = BNX2_LINK_STATUS_100HALF;
935 else
936 fw_link_status = BNX2_LINK_STATUS_100FULL;
937 break;
938 case SPEED_1000:
939 if (bp->duplex == DUPLEX_HALF)
940 fw_link_status = BNX2_LINK_STATUS_1000HALF;
941 else
942 fw_link_status = BNX2_LINK_STATUS_1000FULL;
943 break;
944 case SPEED_2500:
945 if (bp->duplex == DUPLEX_HALF)
946 fw_link_status = BNX2_LINK_STATUS_2500HALF;
947 else
948 fw_link_status = BNX2_LINK_STATUS_2500FULL;
949 break;
950 }
951
952 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
953
954 if (bp->autoneg) {
955 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
956
957 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
958 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
959
960 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
961 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
962 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
963 else
964 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
965 }
966 }
967 else
968 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
969
970 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
971 }
972
973 static char *
974 bnx2_xceiver_str(struct bnx2 *bp)
975 {
976 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
977 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
978 "Copper"));
979 }
980
981 static void
982 bnx2_report_link(struct bnx2 *bp)
983 {
984 if (bp->link_up) {
985 netif_carrier_on(bp->dev);
986 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
987 bnx2_xceiver_str(bp),
988 bp->line_speed,
989 bp->duplex == DUPLEX_FULL ? "full" : "half");
990
991 if (bp->flow_ctrl) {
992 if (bp->flow_ctrl & FLOW_CTRL_RX) {
993 pr_cont(", receive ");
994 if (bp->flow_ctrl & FLOW_CTRL_TX)
995 pr_cont("& transmit ");
996 }
997 else {
998 pr_cont(", transmit ");
999 }
1000 pr_cont("flow control ON");
1001 }
1002 pr_cont("\n");
1003 } else {
1004 netif_carrier_off(bp->dev);
1005 netdev_err(bp->dev, "NIC %s Link is Down\n",
1006 bnx2_xceiver_str(bp));
1007 }
1008
1009 bnx2_report_fw_link(bp);
1010 }
1011
1012 static void
1013 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1014 {
1015 u32 local_adv, remote_adv;
1016
1017 bp->flow_ctrl = 0;
1018 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1019 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1020
1021 if (bp->duplex == DUPLEX_FULL) {
1022 bp->flow_ctrl = bp->req_flow_ctrl;
1023 }
1024 return;
1025 }
1026
1027 if (bp->duplex != DUPLEX_FULL) {
1028 return;
1029 }
1030
1031 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1032 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1033 u32 val;
1034
1035 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1036 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1037 bp->flow_ctrl |= FLOW_CTRL_TX;
1038 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1039 bp->flow_ctrl |= FLOW_CTRL_RX;
1040 return;
1041 }
1042
1043 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1044 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1045
1046 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1047 u32 new_local_adv = 0;
1048 u32 new_remote_adv = 0;
1049
1050 if (local_adv & ADVERTISE_1000XPAUSE)
1051 new_local_adv |= ADVERTISE_PAUSE_CAP;
1052 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1053 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1054 if (remote_adv & ADVERTISE_1000XPAUSE)
1055 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1056 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1057 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1058
1059 local_adv = new_local_adv;
1060 remote_adv = new_remote_adv;
1061 }
1062
1063 /* See Table 28B-3 of 802.3ab-1999 spec. */
1064 if (local_adv & ADVERTISE_PAUSE_CAP) {
1065 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1066 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1067 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1068 }
1069 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1070 bp->flow_ctrl = FLOW_CTRL_RX;
1071 }
1072 }
1073 else {
1074 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1075 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1076 }
1077 }
1078 }
1079 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1080 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1081 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1082
1083 bp->flow_ctrl = FLOW_CTRL_TX;
1084 }
1085 }
1086 }
1087
1088 static int
1089 bnx2_5709s_linkup(struct bnx2 *bp)
1090 {
1091 u32 val, speed;
1092
1093 bp->link_up = 1;
1094
1095 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1096 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1097 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098
1099 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1100 bp->line_speed = bp->req_line_speed;
1101 bp->duplex = bp->req_duplex;
1102 return 0;
1103 }
1104 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1105 switch (speed) {
1106 case MII_BNX2_GP_TOP_AN_SPEED_10:
1107 bp->line_speed = SPEED_10;
1108 break;
1109 case MII_BNX2_GP_TOP_AN_SPEED_100:
1110 bp->line_speed = SPEED_100;
1111 break;
1112 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1113 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1114 bp->line_speed = SPEED_1000;
1115 break;
1116 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1117 bp->line_speed = SPEED_2500;
1118 break;
1119 }
1120 if (val & MII_BNX2_GP_TOP_AN_FD)
1121 bp->duplex = DUPLEX_FULL;
1122 else
1123 bp->duplex = DUPLEX_HALF;
1124 return 0;
1125 }
1126
1127 static int
1128 bnx2_5708s_linkup(struct bnx2 *bp)
1129 {
1130 u32 val;
1131
1132 bp->link_up = 1;
1133 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1134 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1135 case BCM5708S_1000X_STAT1_SPEED_10:
1136 bp->line_speed = SPEED_10;
1137 break;
1138 case BCM5708S_1000X_STAT1_SPEED_100:
1139 bp->line_speed = SPEED_100;
1140 break;
1141 case BCM5708S_1000X_STAT1_SPEED_1G:
1142 bp->line_speed = SPEED_1000;
1143 break;
1144 case BCM5708S_1000X_STAT1_SPEED_2G5:
1145 bp->line_speed = SPEED_2500;
1146 break;
1147 }
1148 if (val & BCM5708S_1000X_STAT1_FD)
1149 bp->duplex = DUPLEX_FULL;
1150 else
1151 bp->duplex = DUPLEX_HALF;
1152
1153 return 0;
1154 }
1155
1156 static int
1157 bnx2_5706s_linkup(struct bnx2 *bp)
1158 {
1159 u32 bmcr, local_adv, remote_adv, common;
1160
1161 bp->link_up = 1;
1162 bp->line_speed = SPEED_1000;
1163
1164 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1165 if (bmcr & BMCR_FULLDPLX) {
1166 bp->duplex = DUPLEX_FULL;
1167 }
1168 else {
1169 bp->duplex = DUPLEX_HALF;
1170 }
1171
1172 if (!(bmcr & BMCR_ANENABLE)) {
1173 return 0;
1174 }
1175
1176 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1177 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1178
1179 common = local_adv & remote_adv;
1180 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1181
1182 if (common & ADVERTISE_1000XFULL) {
1183 bp->duplex = DUPLEX_FULL;
1184 }
1185 else {
1186 bp->duplex = DUPLEX_HALF;
1187 }
1188 }
1189
1190 return 0;
1191 }
1192
1193 static int
1194 bnx2_copper_linkup(struct bnx2 *bp)
1195 {
1196 u32 bmcr;
1197
1198 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1199 if (bmcr & BMCR_ANENABLE) {
1200 u32 local_adv, remote_adv, common;
1201
1202 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1203 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1204
1205 common = local_adv & (remote_adv >> 2);
1206 if (common & ADVERTISE_1000FULL) {
1207 bp->line_speed = SPEED_1000;
1208 bp->duplex = DUPLEX_FULL;
1209 }
1210 else if (common & ADVERTISE_1000HALF) {
1211 bp->line_speed = SPEED_1000;
1212 bp->duplex = DUPLEX_HALF;
1213 }
1214 else {
1215 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1216 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1217
1218 common = local_adv & remote_adv;
1219 if (common & ADVERTISE_100FULL) {
1220 bp->line_speed = SPEED_100;
1221 bp->duplex = DUPLEX_FULL;
1222 }
1223 else if (common & ADVERTISE_100HALF) {
1224 bp->line_speed = SPEED_100;
1225 bp->duplex = DUPLEX_HALF;
1226 }
1227 else if (common & ADVERTISE_10FULL) {
1228 bp->line_speed = SPEED_10;
1229 bp->duplex = DUPLEX_FULL;
1230 }
1231 else if (common & ADVERTISE_10HALF) {
1232 bp->line_speed = SPEED_10;
1233 bp->duplex = DUPLEX_HALF;
1234 }
1235 else {
1236 bp->line_speed = 0;
1237 bp->link_up = 0;
1238 }
1239 }
1240 }
1241 else {
1242 if (bmcr & BMCR_SPEED100) {
1243 bp->line_speed = SPEED_100;
1244 }
1245 else {
1246 bp->line_speed = SPEED_10;
1247 }
1248 if (bmcr & BMCR_FULLDPLX) {
1249 bp->duplex = DUPLEX_FULL;
1250 }
1251 else {
1252 bp->duplex = DUPLEX_HALF;
1253 }
1254 }
1255
1256 return 0;
1257 }
1258
1259 static void
1260 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1261 {
1262 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1263
1264 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1265 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1266 val |= 0x02 << 8;
1267
1268 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1269 u32 lo_water, hi_water;
1270
1271 if (bp->flow_ctrl & FLOW_CTRL_TX)
1272 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1273 else
1274 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1275 if (lo_water >= bp->rx_ring_size)
1276 lo_water = 0;
1277
1278 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1279
1280 if (hi_water <= lo_water)
1281 lo_water = 0;
1282
1283 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1284 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1285
1286 if (hi_water > 0xf)
1287 hi_water = 0xf;
1288 else if (hi_water == 0)
1289 lo_water = 0;
1290 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1291 }
1292 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1293 }
1294
1295 static void
1296 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1297 {
1298 int i;
1299 u32 cid;
1300
1301 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1302 if (i == 1)
1303 cid = RX_RSS_CID;
1304 bnx2_init_rx_context(bp, cid);
1305 }
1306 }
1307
1308 static void
1309 bnx2_set_mac_link(struct bnx2 *bp)
1310 {
1311 u32 val;
1312
1313 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1314 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1315 (bp->duplex == DUPLEX_HALF)) {
1316 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1317 }
1318
1319 /* Configure the EMAC mode register. */
1320 val = REG_RD(bp, BNX2_EMAC_MODE);
1321
1322 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1323 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1324 BNX2_EMAC_MODE_25G_MODE);
1325
1326 if (bp->link_up) {
1327 switch (bp->line_speed) {
1328 case SPEED_10:
1329 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1330 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1331 break;
1332 }
1333 /* fall through */
1334 case SPEED_100:
1335 val |= BNX2_EMAC_MODE_PORT_MII;
1336 break;
1337 case SPEED_2500:
1338 val |= BNX2_EMAC_MODE_25G_MODE;
1339 /* fall through */
1340 case SPEED_1000:
1341 val |= BNX2_EMAC_MODE_PORT_GMII;
1342 break;
1343 }
1344 }
1345 else {
1346 val |= BNX2_EMAC_MODE_PORT_GMII;
1347 }
1348
1349 /* Set the MAC to operate in the appropriate duplex mode. */
1350 if (bp->duplex == DUPLEX_HALF)
1351 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1352 REG_WR(bp, BNX2_EMAC_MODE, val);
1353
1354 /* Enable/disable rx PAUSE. */
1355 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1356
1357 if (bp->flow_ctrl & FLOW_CTRL_RX)
1358 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1359 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1360
1361 /* Enable/disable tx PAUSE. */
1362 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1363 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1364
1365 if (bp->flow_ctrl & FLOW_CTRL_TX)
1366 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1367 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1368
1369 /* Acknowledge the interrupt. */
1370 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1371
1372 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1373 bnx2_init_all_rx_contexts(bp);
1374 }
1375
1376 static void
1377 bnx2_enable_bmsr1(struct bnx2 *bp)
1378 {
1379 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1380 (CHIP_NUM(bp) == CHIP_NUM_5709))
1381 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1382 MII_BNX2_BLK_ADDR_GP_STATUS);
1383 }
1384
1385 static void
1386 bnx2_disable_bmsr1(struct bnx2 *bp)
1387 {
1388 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1389 (CHIP_NUM(bp) == CHIP_NUM_5709))
1390 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1391 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1392 }
1393
1394 static int
1395 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1396 {
1397 u32 up1;
1398 int ret = 1;
1399
1400 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1401 return 0;
1402
1403 if (bp->autoneg & AUTONEG_SPEED)
1404 bp->advertising |= ADVERTISED_2500baseX_Full;
1405
1406 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1407 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1408
1409 bnx2_read_phy(bp, bp->mii_up1, &up1);
1410 if (!(up1 & BCM5708S_UP1_2G5)) {
1411 up1 |= BCM5708S_UP1_2G5;
1412 bnx2_write_phy(bp, bp->mii_up1, up1);
1413 ret = 0;
1414 }
1415
1416 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1417 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1418 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1419
1420 return ret;
1421 }
1422
1423 static int
1424 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1425 {
1426 u32 up1;
1427 int ret = 0;
1428
1429 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1430 return 0;
1431
1432 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1433 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1434
1435 bnx2_read_phy(bp, bp->mii_up1, &up1);
1436 if (up1 & BCM5708S_UP1_2G5) {
1437 up1 &= ~BCM5708S_UP1_2G5;
1438 bnx2_write_phy(bp, bp->mii_up1, up1);
1439 ret = 1;
1440 }
1441
1442 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1443 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1444 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1445
1446 return ret;
1447 }
1448
1449 static void
1450 bnx2_enable_forced_2g5(struct bnx2 *bp)
1451 {
1452 u32 bmcr;
1453
1454 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1455 return;
1456
1457 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1458 u32 val;
1459
1460 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1461 MII_BNX2_BLK_ADDR_SERDES_DIG);
1462 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1463 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1464 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1465 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1466
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1469 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1470
1471 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1472 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1473 bmcr |= BCM5708S_BMCR_FORCE_2500;
1474 } else {
1475 return;
1476 }
1477
1478 if (bp->autoneg & AUTONEG_SPEED) {
1479 bmcr &= ~BMCR_ANENABLE;
1480 if (bp->req_duplex == DUPLEX_FULL)
1481 bmcr |= BMCR_FULLDPLX;
1482 }
1483 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1484 }
1485
1486 static void
1487 bnx2_disable_forced_2g5(struct bnx2 *bp)
1488 {
1489 u32 bmcr;
1490
1491 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1492 return;
1493
1494 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1495 u32 val;
1496
1497 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1498 MII_BNX2_BLK_ADDR_SERDES_DIG);
1499 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1500 val &= ~MII_BNX2_SD_MISC1_FORCE;
1501 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1502
1503 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1504 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1505 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1506
1507 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1508 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1509 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1510 } else {
1511 return;
1512 }
1513
1514 if (bp->autoneg & AUTONEG_SPEED)
1515 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1516 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1517 }
1518
1519 static void
1520 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1521 {
1522 u32 val;
1523
1524 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1525 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1526 if (start)
1527 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1528 else
1529 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1530 }
1531
1532 static int
1533 bnx2_set_link(struct bnx2 *bp)
1534 {
1535 u32 bmsr;
1536 u8 link_up;
1537
1538 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1539 bp->link_up = 1;
1540 return 0;
1541 }
1542
1543 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1544 return 0;
1545
1546 link_up = bp->link_up;
1547
1548 bnx2_enable_bmsr1(bp);
1549 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1550 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1551 bnx2_disable_bmsr1(bp);
1552
1553 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1554 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1555 u32 val, an_dbg;
1556
1557 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1558 bnx2_5706s_force_link_dn(bp, 0);
1559 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1560 }
1561 val = REG_RD(bp, BNX2_EMAC_STATUS);
1562
1563 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1564 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1565 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1566
1567 if ((val & BNX2_EMAC_STATUS_LINK) &&
1568 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1569 bmsr |= BMSR_LSTATUS;
1570 else
1571 bmsr &= ~BMSR_LSTATUS;
1572 }
1573
1574 if (bmsr & BMSR_LSTATUS) {
1575 bp->link_up = 1;
1576
1577 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1578 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1579 bnx2_5706s_linkup(bp);
1580 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1581 bnx2_5708s_linkup(bp);
1582 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1583 bnx2_5709s_linkup(bp);
1584 }
1585 else {
1586 bnx2_copper_linkup(bp);
1587 }
1588 bnx2_resolve_flow_ctrl(bp);
1589 }
1590 else {
1591 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1592 (bp->autoneg & AUTONEG_SPEED))
1593 bnx2_disable_forced_2g5(bp);
1594
1595 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1596 u32 bmcr;
1597
1598 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1599 bmcr |= BMCR_ANENABLE;
1600 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1601
1602 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1603 }
1604 bp->link_up = 0;
1605 }
1606
1607 if (bp->link_up != link_up) {
1608 bnx2_report_link(bp);
1609 }
1610
1611 bnx2_set_mac_link(bp);
1612
1613 return 0;
1614 }
1615
1616 static int
1617 bnx2_reset_phy(struct bnx2 *bp)
1618 {
1619 int i;
1620 u32 reg;
1621
1622 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1623
1624 #define PHY_RESET_MAX_WAIT 100
1625 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1626 udelay(10);
1627
1628 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1629 if (!(reg & BMCR_RESET)) {
1630 udelay(20);
1631 break;
1632 }
1633 }
1634 if (i == PHY_RESET_MAX_WAIT) {
1635 return -EBUSY;
1636 }
1637 return 0;
1638 }
1639
1640 static u32
1641 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1642 {
1643 u32 adv = 0;
1644
1645 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1646 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1647
1648 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1649 adv = ADVERTISE_1000XPAUSE;
1650 }
1651 else {
1652 adv = ADVERTISE_PAUSE_CAP;
1653 }
1654 }
1655 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1656 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1657 adv = ADVERTISE_1000XPSE_ASYM;
1658 }
1659 else {
1660 adv = ADVERTISE_PAUSE_ASYM;
1661 }
1662 }
1663 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1664 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1665 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1666 }
1667 else {
1668 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1669 }
1670 }
1671 return adv;
1672 }
1673
1674 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1675
1676 static int
1677 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1678 __releases(&bp->phy_lock)
1679 __acquires(&bp->phy_lock)
1680 {
1681 u32 speed_arg = 0, pause_adv;
1682
1683 pause_adv = bnx2_phy_get_pause_adv(bp);
1684
1685 if (bp->autoneg & AUTONEG_SPEED) {
1686 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1687 if (bp->advertising & ADVERTISED_10baseT_Half)
1688 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1689 if (bp->advertising & ADVERTISED_10baseT_Full)
1690 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1691 if (bp->advertising & ADVERTISED_100baseT_Half)
1692 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1693 if (bp->advertising & ADVERTISED_100baseT_Full)
1694 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1695 if (bp->advertising & ADVERTISED_1000baseT_Full)
1696 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1697 if (bp->advertising & ADVERTISED_2500baseX_Full)
1698 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1699 } else {
1700 if (bp->req_line_speed == SPEED_2500)
1701 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1702 else if (bp->req_line_speed == SPEED_1000)
1703 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1704 else if (bp->req_line_speed == SPEED_100) {
1705 if (bp->req_duplex == DUPLEX_FULL)
1706 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1707 else
1708 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1709 } else if (bp->req_line_speed == SPEED_10) {
1710 if (bp->req_duplex == DUPLEX_FULL)
1711 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1712 else
1713 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1714 }
1715 }
1716
1717 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1718 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1719 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1720 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1721
1722 if (port == PORT_TP)
1723 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1724 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1725
1726 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1727
1728 spin_unlock_bh(&bp->phy_lock);
1729 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1730 spin_lock_bh(&bp->phy_lock);
1731
1732 return 0;
1733 }
1734
1735 static int
1736 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1737 __releases(&bp->phy_lock)
1738 __acquires(&bp->phy_lock)
1739 {
1740 u32 adv, bmcr;
1741 u32 new_adv = 0;
1742
1743 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1744 return (bnx2_setup_remote_phy(bp, port));
1745
1746 if (!(bp->autoneg & AUTONEG_SPEED)) {
1747 u32 new_bmcr;
1748 int force_link_down = 0;
1749
1750 if (bp->req_line_speed == SPEED_2500) {
1751 if (!bnx2_test_and_enable_2g5(bp))
1752 force_link_down = 1;
1753 } else if (bp->req_line_speed == SPEED_1000) {
1754 if (bnx2_test_and_disable_2g5(bp))
1755 force_link_down = 1;
1756 }
1757 bnx2_read_phy(bp, bp->mii_adv, &adv);
1758 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1759
1760 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1761 new_bmcr = bmcr & ~BMCR_ANENABLE;
1762 new_bmcr |= BMCR_SPEED1000;
1763
1764 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1765 if (bp->req_line_speed == SPEED_2500)
1766 bnx2_enable_forced_2g5(bp);
1767 else if (bp->req_line_speed == SPEED_1000) {
1768 bnx2_disable_forced_2g5(bp);
1769 new_bmcr &= ~0x2000;
1770 }
1771
1772 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1773 if (bp->req_line_speed == SPEED_2500)
1774 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1775 else
1776 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1777 }
1778
1779 if (bp->req_duplex == DUPLEX_FULL) {
1780 adv |= ADVERTISE_1000XFULL;
1781 new_bmcr |= BMCR_FULLDPLX;
1782 }
1783 else {
1784 adv |= ADVERTISE_1000XHALF;
1785 new_bmcr &= ~BMCR_FULLDPLX;
1786 }
1787 if ((new_bmcr != bmcr) || (force_link_down)) {
1788 /* Force a link down visible on the other side */
1789 if (bp->link_up) {
1790 bnx2_write_phy(bp, bp->mii_adv, adv &
1791 ~(ADVERTISE_1000XFULL |
1792 ADVERTISE_1000XHALF));
1793 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1794 BMCR_ANRESTART | BMCR_ANENABLE);
1795
1796 bp->link_up = 0;
1797 netif_carrier_off(bp->dev);
1798 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1799 bnx2_report_link(bp);
1800 }
1801 bnx2_write_phy(bp, bp->mii_adv, adv);
1802 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1803 } else {
1804 bnx2_resolve_flow_ctrl(bp);
1805 bnx2_set_mac_link(bp);
1806 }
1807 return 0;
1808 }
1809
1810 bnx2_test_and_enable_2g5(bp);
1811
1812 if (bp->advertising & ADVERTISED_1000baseT_Full)
1813 new_adv |= ADVERTISE_1000XFULL;
1814
1815 new_adv |= bnx2_phy_get_pause_adv(bp);
1816
1817 bnx2_read_phy(bp, bp->mii_adv, &adv);
1818 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1819
1820 bp->serdes_an_pending = 0;
1821 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1822 /* Force a link down visible on the other side */
1823 if (bp->link_up) {
1824 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1825 spin_unlock_bh(&bp->phy_lock);
1826 msleep(20);
1827 spin_lock_bh(&bp->phy_lock);
1828 }
1829
1830 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1831 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1832 BMCR_ANENABLE);
1833 /* Speed up link-up time when the link partner
1834 * does not autonegotiate which is very common
1835 * in blade servers. Some blade servers use
1836 * IPMI for kerboard input and it's important
1837 * to minimize link disruptions. Autoneg. involves
1838 * exchanging base pages plus 3 next pages and
1839 * normally completes in about 120 msec.
1840 */
1841 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1842 bp->serdes_an_pending = 1;
1843 mod_timer(&bp->timer, jiffies + bp->current_interval);
1844 } else {
1845 bnx2_resolve_flow_ctrl(bp);
1846 bnx2_set_mac_link(bp);
1847 }
1848
1849 return 0;
1850 }
1851
1852 #define ETHTOOL_ALL_FIBRE_SPEED \
1853 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1854 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1855 (ADVERTISED_1000baseT_Full)
1856
1857 #define ETHTOOL_ALL_COPPER_SPEED \
1858 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1859 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1860 ADVERTISED_1000baseT_Full)
1861
1862 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1863 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1864
1865 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1866
1867 static void
1868 bnx2_set_default_remote_link(struct bnx2 *bp)
1869 {
1870 u32 link;
1871
1872 if (bp->phy_port == PORT_TP)
1873 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1874 else
1875 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1876
1877 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1878 bp->req_line_speed = 0;
1879 bp->autoneg |= AUTONEG_SPEED;
1880 bp->advertising = ADVERTISED_Autoneg;
1881 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1882 bp->advertising |= ADVERTISED_10baseT_Half;
1883 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1884 bp->advertising |= ADVERTISED_10baseT_Full;
1885 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1886 bp->advertising |= ADVERTISED_100baseT_Half;
1887 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1888 bp->advertising |= ADVERTISED_100baseT_Full;
1889 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1890 bp->advertising |= ADVERTISED_1000baseT_Full;
1891 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1892 bp->advertising |= ADVERTISED_2500baseX_Full;
1893 } else {
1894 bp->autoneg = 0;
1895 bp->advertising = 0;
1896 bp->req_duplex = DUPLEX_FULL;
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1898 bp->req_line_speed = SPEED_10;
1899 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1900 bp->req_duplex = DUPLEX_HALF;
1901 }
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1903 bp->req_line_speed = SPEED_100;
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1905 bp->req_duplex = DUPLEX_HALF;
1906 }
1907 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1908 bp->req_line_speed = SPEED_1000;
1909 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1910 bp->req_line_speed = SPEED_2500;
1911 }
1912 }
1913
1914 static void
1915 bnx2_set_default_link(struct bnx2 *bp)
1916 {
1917 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1918 bnx2_set_default_remote_link(bp);
1919 return;
1920 }
1921
1922 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1923 bp->req_line_speed = 0;
1924 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1925 u32 reg;
1926
1927 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1928
1929 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1930 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1931 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1932 bp->autoneg = 0;
1933 bp->req_line_speed = bp->line_speed = SPEED_1000;
1934 bp->req_duplex = DUPLEX_FULL;
1935 }
1936 } else
1937 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1938 }
1939
1940 static void
1941 bnx2_send_heart_beat(struct bnx2 *bp)
1942 {
1943 u32 msg;
1944 u32 addr;
1945
1946 spin_lock(&bp->indirect_lock);
1947 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1948 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1949 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1950 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1951 spin_unlock(&bp->indirect_lock);
1952 }
1953
1954 static void
1955 bnx2_remote_phy_event(struct bnx2 *bp)
1956 {
1957 u32 msg;
1958 u8 link_up = bp->link_up;
1959 u8 old_port;
1960
1961 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1962
1963 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1964 bnx2_send_heart_beat(bp);
1965
1966 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1967
1968 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1969 bp->link_up = 0;
1970 else {
1971 u32 speed;
1972
1973 bp->link_up = 1;
1974 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1975 bp->duplex = DUPLEX_FULL;
1976 switch (speed) {
1977 case BNX2_LINK_STATUS_10HALF:
1978 bp->duplex = DUPLEX_HALF;
1979 case BNX2_LINK_STATUS_10FULL:
1980 bp->line_speed = SPEED_10;
1981 break;
1982 case BNX2_LINK_STATUS_100HALF:
1983 bp->duplex = DUPLEX_HALF;
1984 case BNX2_LINK_STATUS_100BASE_T4:
1985 case BNX2_LINK_STATUS_100FULL:
1986 bp->line_speed = SPEED_100;
1987 break;
1988 case BNX2_LINK_STATUS_1000HALF:
1989 bp->duplex = DUPLEX_HALF;
1990 case BNX2_LINK_STATUS_1000FULL:
1991 bp->line_speed = SPEED_1000;
1992 break;
1993 case BNX2_LINK_STATUS_2500HALF:
1994 bp->duplex = DUPLEX_HALF;
1995 case BNX2_LINK_STATUS_2500FULL:
1996 bp->line_speed = SPEED_2500;
1997 break;
1998 default:
1999 bp->line_speed = 0;
2000 break;
2001 }
2002
2003 bp->flow_ctrl = 0;
2004 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2005 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2006 if (bp->duplex == DUPLEX_FULL)
2007 bp->flow_ctrl = bp->req_flow_ctrl;
2008 } else {
2009 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2010 bp->flow_ctrl |= FLOW_CTRL_TX;
2011 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2012 bp->flow_ctrl |= FLOW_CTRL_RX;
2013 }
2014
2015 old_port = bp->phy_port;
2016 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2017 bp->phy_port = PORT_FIBRE;
2018 else
2019 bp->phy_port = PORT_TP;
2020
2021 if (old_port != bp->phy_port)
2022 bnx2_set_default_link(bp);
2023
2024 }
2025 if (bp->link_up != link_up)
2026 bnx2_report_link(bp);
2027
2028 bnx2_set_mac_link(bp);
2029 }
2030
2031 static int
2032 bnx2_set_remote_link(struct bnx2 *bp)
2033 {
2034 u32 evt_code;
2035
2036 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2037 switch (evt_code) {
2038 case BNX2_FW_EVT_CODE_LINK_EVENT:
2039 bnx2_remote_phy_event(bp);
2040 break;
2041 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2042 default:
2043 bnx2_send_heart_beat(bp);
2044 break;
2045 }
2046 return 0;
2047 }
2048
2049 static int
2050 bnx2_setup_copper_phy(struct bnx2 *bp)
2051 __releases(&bp->phy_lock)
2052 __acquires(&bp->phy_lock)
2053 {
2054 u32 bmcr;
2055 u32 new_bmcr;
2056
2057 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2058
2059 if (bp->autoneg & AUTONEG_SPEED) {
2060 u32 adv_reg, adv1000_reg;
2061 u32 new_adv_reg = 0;
2062 u32 new_adv1000_reg = 0;
2063
2064 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2065 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2066 ADVERTISE_PAUSE_ASYM);
2067
2068 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2069 adv1000_reg &= PHY_ALL_1000_SPEED;
2070
2071 if (bp->advertising & ADVERTISED_10baseT_Half)
2072 new_adv_reg |= ADVERTISE_10HALF;
2073 if (bp->advertising & ADVERTISED_10baseT_Full)
2074 new_adv_reg |= ADVERTISE_10FULL;
2075 if (bp->advertising & ADVERTISED_100baseT_Half)
2076 new_adv_reg |= ADVERTISE_100HALF;
2077 if (bp->advertising & ADVERTISED_100baseT_Full)
2078 new_adv_reg |= ADVERTISE_100FULL;
2079 if (bp->advertising & ADVERTISED_1000baseT_Full)
2080 new_adv1000_reg |= ADVERTISE_1000FULL;
2081
2082 new_adv_reg |= ADVERTISE_CSMA;
2083
2084 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2085
2086 if ((adv1000_reg != new_adv1000_reg) ||
2087 (adv_reg != new_adv_reg) ||
2088 ((bmcr & BMCR_ANENABLE) == 0)) {
2089
2090 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2091 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2092 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2093 BMCR_ANENABLE);
2094 }
2095 else if (bp->link_up) {
2096 /* Flow ctrl may have changed from auto to forced */
2097 /* or vice-versa. */
2098
2099 bnx2_resolve_flow_ctrl(bp);
2100 bnx2_set_mac_link(bp);
2101 }
2102 return 0;
2103 }
2104
2105 new_bmcr = 0;
2106 if (bp->req_line_speed == SPEED_100) {
2107 new_bmcr |= BMCR_SPEED100;
2108 }
2109 if (bp->req_duplex == DUPLEX_FULL) {
2110 new_bmcr |= BMCR_FULLDPLX;
2111 }
2112 if (new_bmcr != bmcr) {
2113 u32 bmsr;
2114
2115 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117
2118 if (bmsr & BMSR_LSTATUS) {
2119 /* Force link down */
2120 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2121 spin_unlock_bh(&bp->phy_lock);
2122 msleep(50);
2123 spin_lock_bh(&bp->phy_lock);
2124
2125 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2127 }
2128
2129 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2130
2131 /* Normally, the new speed is setup after the link has
2132 * gone down and up again. In some cases, link will not go
2133 * down so we need to set up the new speed here.
2134 */
2135 if (bmsr & BMSR_LSTATUS) {
2136 bp->line_speed = bp->req_line_speed;
2137 bp->duplex = bp->req_duplex;
2138 bnx2_resolve_flow_ctrl(bp);
2139 bnx2_set_mac_link(bp);
2140 }
2141 } else {
2142 bnx2_resolve_flow_ctrl(bp);
2143 bnx2_set_mac_link(bp);
2144 }
2145 return 0;
2146 }
2147
2148 static int
2149 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2150 __releases(&bp->phy_lock)
2151 __acquires(&bp->phy_lock)
2152 {
2153 if (bp->loopback == MAC_LOOPBACK)
2154 return 0;
2155
2156 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2157 return (bnx2_setup_serdes_phy(bp, port));
2158 }
2159 else {
2160 return (bnx2_setup_copper_phy(bp));
2161 }
2162 }
2163
2164 static int
2165 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2166 {
2167 u32 val;
2168
2169 bp->mii_bmcr = MII_BMCR + 0x10;
2170 bp->mii_bmsr = MII_BMSR + 0x10;
2171 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2172 bp->mii_adv = MII_ADVERTISE + 0x10;
2173 bp->mii_lpa = MII_LPA + 0x10;
2174 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2175
2176 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2177 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2178
2179 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2180 if (reset_phy)
2181 bnx2_reset_phy(bp);
2182
2183 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2184
2185 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2186 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2187 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2188 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2189
2190 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2191 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2192 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2193 val |= BCM5708S_UP1_2G5;
2194 else
2195 val &= ~BCM5708S_UP1_2G5;
2196 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2197
2198 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2199 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2200 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2201 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2202
2203 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2204
2205 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2206 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2207 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2208
2209 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2210
2211 return 0;
2212 }
2213
2214 static int
2215 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2216 {
2217 u32 val;
2218
2219 if (reset_phy)
2220 bnx2_reset_phy(bp);
2221
2222 bp->mii_up1 = BCM5708S_UP1;
2223
2224 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2225 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2226 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2227
2228 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2229 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2230 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2231
2232 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2233 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2234 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2235
2236 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2237 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2238 val |= BCM5708S_UP1_2G5;
2239 bnx2_write_phy(bp, BCM5708S_UP1, val);
2240 }
2241
2242 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2243 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2244 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2245 /* increase tx signal amplitude */
2246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2247 BCM5708S_BLK_ADDR_TX_MISC);
2248 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2249 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2250 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2252 }
2253
2254 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2255 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2256
2257 if (val) {
2258 u32 is_backplane;
2259
2260 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2261 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2262 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263 BCM5708S_BLK_ADDR_TX_MISC);
2264 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2265 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2266 BCM5708S_BLK_ADDR_DIG);
2267 }
2268 }
2269 return 0;
2270 }
2271
2272 static int
2273 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2274 {
2275 if (reset_phy)
2276 bnx2_reset_phy(bp);
2277
2278 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2279
2280 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2281 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2282
2283 if (bp->dev->mtu > 1500) {
2284 u32 val;
2285
2286 /* Set extended packet length bit */
2287 bnx2_write_phy(bp, 0x18, 0x7);
2288 bnx2_read_phy(bp, 0x18, &val);
2289 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2290
2291 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292 bnx2_read_phy(bp, 0x1c, &val);
2293 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2294 }
2295 else {
2296 u32 val;
2297
2298 bnx2_write_phy(bp, 0x18, 0x7);
2299 bnx2_read_phy(bp, 0x18, &val);
2300 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2301
2302 bnx2_write_phy(bp, 0x1c, 0x6c00);
2303 bnx2_read_phy(bp, 0x1c, &val);
2304 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2305 }
2306
2307 return 0;
2308 }
2309
2310 static int
2311 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2312 {
2313 u32 val;
2314
2315 if (reset_phy)
2316 bnx2_reset_phy(bp);
2317
2318 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2319 bnx2_write_phy(bp, 0x18, 0x0c00);
2320 bnx2_write_phy(bp, 0x17, 0x000a);
2321 bnx2_write_phy(bp, 0x15, 0x310b);
2322 bnx2_write_phy(bp, 0x17, 0x201f);
2323 bnx2_write_phy(bp, 0x15, 0x9506);
2324 bnx2_write_phy(bp, 0x17, 0x401f);
2325 bnx2_write_phy(bp, 0x15, 0x14e2);
2326 bnx2_write_phy(bp, 0x18, 0x0400);
2327 }
2328
2329 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2330 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2331 MII_BNX2_DSP_EXPAND_REG | 0x8);
2332 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2333 val &= ~(1 << 8);
2334 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2335 }
2336
2337 if (bp->dev->mtu > 1500) {
2338 /* Set extended packet length bit */
2339 bnx2_write_phy(bp, 0x18, 0x7);
2340 bnx2_read_phy(bp, 0x18, &val);
2341 bnx2_write_phy(bp, 0x18, val | 0x4000);
2342
2343 bnx2_read_phy(bp, 0x10, &val);
2344 bnx2_write_phy(bp, 0x10, val | 0x1);
2345 }
2346 else {
2347 bnx2_write_phy(bp, 0x18, 0x7);
2348 bnx2_read_phy(bp, 0x18, &val);
2349 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2350
2351 bnx2_read_phy(bp, 0x10, &val);
2352 bnx2_write_phy(bp, 0x10, val & ~0x1);
2353 }
2354
2355 /* ethernet@wirespeed */
2356 bnx2_write_phy(bp, 0x18, 0x7007);
2357 bnx2_read_phy(bp, 0x18, &val);
2358 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2359 return 0;
2360 }
2361
2362
2363 static int
2364 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2365 __releases(&bp->phy_lock)
2366 __acquires(&bp->phy_lock)
2367 {
2368 u32 val;
2369 int rc = 0;
2370
2371 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2372 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2373
2374 bp->mii_bmcr = MII_BMCR;
2375 bp->mii_bmsr = MII_BMSR;
2376 bp->mii_bmsr1 = MII_BMSR;
2377 bp->mii_adv = MII_ADVERTISE;
2378 bp->mii_lpa = MII_LPA;
2379
2380 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2381
2382 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2383 goto setup_phy;
2384
2385 bnx2_read_phy(bp, MII_PHYSID1, &val);
2386 bp->phy_id = val << 16;
2387 bnx2_read_phy(bp, MII_PHYSID2, &val);
2388 bp->phy_id |= val & 0xffff;
2389
2390 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2391 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2392 rc = bnx2_init_5706s_phy(bp, reset_phy);
2393 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2394 rc = bnx2_init_5708s_phy(bp, reset_phy);
2395 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2396 rc = bnx2_init_5709s_phy(bp, reset_phy);
2397 }
2398 else {
2399 rc = bnx2_init_copper_phy(bp, reset_phy);
2400 }
2401
2402 setup_phy:
2403 if (!rc)
2404 rc = bnx2_setup_phy(bp, bp->phy_port);
2405
2406 return rc;
2407 }
2408
2409 static int
2410 bnx2_set_mac_loopback(struct bnx2 *bp)
2411 {
2412 u32 mac_mode;
2413
2414 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2415 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2416 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2417 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2418 bp->link_up = 1;
2419 return 0;
2420 }
2421
2422 static int bnx2_test_link(struct bnx2 *);
2423
2424 static int
2425 bnx2_set_phy_loopback(struct bnx2 *bp)
2426 {
2427 u32 mac_mode;
2428 int rc, i;
2429
2430 spin_lock_bh(&bp->phy_lock);
2431 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2432 BMCR_SPEED1000);
2433 spin_unlock_bh(&bp->phy_lock);
2434 if (rc)
2435 return rc;
2436
2437 for (i = 0; i < 10; i++) {
2438 if (bnx2_test_link(bp) == 0)
2439 break;
2440 msleep(100);
2441 }
2442
2443 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2444 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2445 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2446 BNX2_EMAC_MODE_25G_MODE);
2447
2448 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2449 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2450 bp->link_up = 1;
2451 return 0;
2452 }
2453
2454 static int
2455 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2456 {
2457 int i;
2458 u32 val;
2459
2460 bp->fw_wr_seq++;
2461 msg_data |= bp->fw_wr_seq;
2462
2463 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2464
2465 if (!ack)
2466 return 0;
2467
2468 /* wait for an acknowledgement. */
2469 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2470 msleep(10);
2471
2472 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2473
2474 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2475 break;
2476 }
2477 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2478 return 0;
2479
2480 /* If we timed out, inform the firmware that this is the case. */
2481 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2482 if (!silent)
2483 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2484
2485 msg_data &= ~BNX2_DRV_MSG_CODE;
2486 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2487
2488 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2489
2490 return -EBUSY;
2491 }
2492
2493 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2494 return -EIO;
2495
2496 return 0;
2497 }
2498
2499 static int
2500 bnx2_init_5709_context(struct bnx2 *bp)
2501 {
2502 int i, ret = 0;
2503 u32 val;
2504
2505 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2506 val |= (BCM_PAGE_BITS - 8) << 16;
2507 REG_WR(bp, BNX2_CTX_COMMAND, val);
2508 for (i = 0; i < 10; i++) {
2509 val = REG_RD(bp, BNX2_CTX_COMMAND);
2510 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2511 break;
2512 udelay(2);
2513 }
2514 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2515 return -EBUSY;
2516
2517 for (i = 0; i < bp->ctx_pages; i++) {
2518 int j;
2519
2520 if (bp->ctx_blk[i])
2521 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2522 else
2523 return -ENOMEM;
2524
2525 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2526 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2527 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2528 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2529 (u64) bp->ctx_blk_mapping[i] >> 32);
2530 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2531 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2532 for (j = 0; j < 10; j++) {
2533
2534 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2535 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2536 break;
2537 udelay(5);
2538 }
2539 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2540 ret = -EBUSY;
2541 break;
2542 }
2543 }
2544 return ret;
2545 }
2546
2547 static void
2548 bnx2_init_context(struct bnx2 *bp)
2549 {
2550 u32 vcid;
2551
2552 vcid = 96;
2553 while (vcid) {
2554 u32 vcid_addr, pcid_addr, offset;
2555 int i;
2556
2557 vcid--;
2558
2559 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2560 u32 new_vcid;
2561
2562 vcid_addr = GET_PCID_ADDR(vcid);
2563 if (vcid & 0x8) {
2564 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2565 }
2566 else {
2567 new_vcid = vcid;
2568 }
2569 pcid_addr = GET_PCID_ADDR(new_vcid);
2570 }
2571 else {
2572 vcid_addr = GET_CID_ADDR(vcid);
2573 pcid_addr = vcid_addr;
2574 }
2575
2576 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2577 vcid_addr += (i << PHY_CTX_SHIFT);
2578 pcid_addr += (i << PHY_CTX_SHIFT);
2579
2580 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2581 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2582
2583 /* Zero out the context. */
2584 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2585 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2586 }
2587 }
2588 }
2589
2590 static int
2591 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2592 {
2593 u16 *good_mbuf;
2594 u32 good_mbuf_cnt;
2595 u32 val;
2596
2597 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2598 if (good_mbuf == NULL) {
2599 pr_err("Failed to allocate memory in %s\n", __func__);
2600 return -ENOMEM;
2601 }
2602
2603 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2604 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2605
2606 good_mbuf_cnt = 0;
2607
2608 /* Allocate a bunch of mbufs and save the good ones in an array. */
2609 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2610 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2611 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2612 BNX2_RBUF_COMMAND_ALLOC_REQ);
2613
2614 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2615
2616 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2617
2618 /* The addresses with Bit 9 set are bad memory blocks. */
2619 if (!(val & (1 << 9))) {
2620 good_mbuf[good_mbuf_cnt] = (u16) val;
2621 good_mbuf_cnt++;
2622 }
2623
2624 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2625 }
2626
2627 /* Free the good ones back to the mbuf pool thus discarding
2628 * all the bad ones. */
2629 while (good_mbuf_cnt) {
2630 good_mbuf_cnt--;
2631
2632 val = good_mbuf[good_mbuf_cnt];
2633 val = (val << 9) | val | 1;
2634
2635 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2636 }
2637 kfree(good_mbuf);
2638 return 0;
2639 }
2640
2641 static void
2642 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2643 {
2644 u32 val;
2645
2646 val = (mac_addr[0] << 8) | mac_addr[1];
2647
2648 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2649
2650 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2651 (mac_addr[4] << 8) | mac_addr[5];
2652
2653 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2654 }
2655
2656 static inline int
2657 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2658 {
2659 dma_addr_t mapping;
2660 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2661 struct rx_bd *rxbd =
2662 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2663 struct page *page = alloc_page(GFP_ATOMIC);
2664
2665 if (!page)
2666 return -ENOMEM;
2667 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2668 PCI_DMA_FROMDEVICE);
2669 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2670 __free_page(page);
2671 return -EIO;
2672 }
2673
2674 rx_pg->page = page;
2675 pci_unmap_addr_set(rx_pg, mapping, mapping);
2676 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2677 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2678 return 0;
2679 }
2680
2681 static void
2682 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2683 {
2684 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2685 struct page *page = rx_pg->page;
2686
2687 if (!page)
2688 return;
2689
2690 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2691 PCI_DMA_FROMDEVICE);
2692
2693 __free_page(page);
2694 rx_pg->page = NULL;
2695 }
2696
2697 static inline int
2698 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2699 {
2700 struct sk_buff *skb;
2701 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2702 dma_addr_t mapping;
2703 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2704 unsigned long align;
2705
2706 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2707 if (skb == NULL) {
2708 return -ENOMEM;
2709 }
2710
2711 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2712 skb_reserve(skb, BNX2_RX_ALIGN - align);
2713
2714 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2715 PCI_DMA_FROMDEVICE);
2716 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2717 dev_kfree_skb(skb);
2718 return -EIO;
2719 }
2720
2721 rx_buf->skb = skb;
2722 pci_unmap_addr_set(rx_buf, mapping, mapping);
2723
2724 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2725 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2726
2727 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2728
2729 return 0;
2730 }
2731
2732 static int
2733 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2734 {
2735 struct status_block *sblk = bnapi->status_blk.msi;
2736 u32 new_link_state, old_link_state;
2737 int is_set = 1;
2738
2739 new_link_state = sblk->status_attn_bits & event;
2740 old_link_state = sblk->status_attn_bits_ack & event;
2741 if (new_link_state != old_link_state) {
2742 if (new_link_state)
2743 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2744 else
2745 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2746 } else
2747 is_set = 0;
2748
2749 return is_set;
2750 }
2751
2752 static void
2753 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2754 {
2755 spin_lock(&bp->phy_lock);
2756
2757 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2758 bnx2_set_link(bp);
2759 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2760 bnx2_set_remote_link(bp);
2761
2762 spin_unlock(&bp->phy_lock);
2763
2764 }
2765
2766 static inline u16
2767 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2768 {
2769 u16 cons;
2770
2771 /* Tell compiler that status block fields can change. */
2772 barrier();
2773 cons = *bnapi->hw_tx_cons_ptr;
2774 barrier();
2775 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2776 cons++;
2777 return cons;
2778 }
2779
2780 static int
2781 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2782 {
2783 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2784 u16 hw_cons, sw_cons, sw_ring_cons;
2785 int tx_pkt = 0, index;
2786 struct netdev_queue *txq;
2787
2788 index = (bnapi - bp->bnx2_napi);
2789 txq = netdev_get_tx_queue(bp->dev, index);
2790
2791 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2792 sw_cons = txr->tx_cons;
2793
2794 while (sw_cons != hw_cons) {
2795 struct sw_tx_bd *tx_buf;
2796 struct sk_buff *skb;
2797 int i, last;
2798
2799 sw_ring_cons = TX_RING_IDX(sw_cons);
2800
2801 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2802 skb = tx_buf->skb;
2803
2804 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2805 prefetch(&skb->end);
2806
2807 /* partial BD completions possible with TSO packets */
2808 if (tx_buf->is_gso) {
2809 u16 last_idx, last_ring_idx;
2810
2811 last_idx = sw_cons + tx_buf->nr_frags + 1;
2812 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2813 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2814 last_idx++;
2815 }
2816 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2817 break;
2818 }
2819 }
2820
2821 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2822 skb_headlen(skb), PCI_DMA_TODEVICE);
2823
2824 tx_buf->skb = NULL;
2825 last = tx_buf->nr_frags;
2826
2827 for (i = 0; i < last; i++) {
2828 sw_cons = NEXT_TX_BD(sw_cons);
2829
2830 pci_unmap_page(bp->pdev,
2831 pci_unmap_addr(
2832 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2833 mapping),
2834 skb_shinfo(skb)->frags[i].size,
2835 PCI_DMA_TODEVICE);
2836 }
2837
2838 sw_cons = NEXT_TX_BD(sw_cons);
2839
2840 dev_kfree_skb(skb);
2841 tx_pkt++;
2842 if (tx_pkt == budget)
2843 break;
2844
2845 if (hw_cons == sw_cons)
2846 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2847 }
2848
2849 txr->hw_tx_cons = hw_cons;
2850 txr->tx_cons = sw_cons;
2851
2852 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2853 * before checking for netif_tx_queue_stopped(). Without the
2854 * memory barrier, there is a small possibility that bnx2_start_xmit()
2855 * will miss it and cause the queue to be stopped forever.
2856 */
2857 smp_mb();
2858
2859 if (unlikely(netif_tx_queue_stopped(txq)) &&
2860 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2861 __netif_tx_lock(txq, smp_processor_id());
2862 if ((netif_tx_queue_stopped(txq)) &&
2863 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2864 netif_tx_wake_queue(txq);
2865 __netif_tx_unlock(txq);
2866 }
2867
2868 return tx_pkt;
2869 }
2870
2871 static void
2872 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2873 struct sk_buff *skb, int count)
2874 {
2875 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2876 struct rx_bd *cons_bd, *prod_bd;
2877 int i;
2878 u16 hw_prod, prod;
2879 u16 cons = rxr->rx_pg_cons;
2880
2881 cons_rx_pg = &rxr->rx_pg_ring[cons];
2882
2883 /* The caller was unable to allocate a new page to replace the
2884 * last one in the frags array, so we need to recycle that page
2885 * and then free the skb.
2886 */
2887 if (skb) {
2888 struct page *page;
2889 struct skb_shared_info *shinfo;
2890
2891 shinfo = skb_shinfo(skb);
2892 shinfo->nr_frags--;
2893 page = shinfo->frags[shinfo->nr_frags].page;
2894 shinfo->frags[shinfo->nr_frags].page = NULL;
2895
2896 cons_rx_pg->page = page;
2897 dev_kfree_skb(skb);
2898 }
2899
2900 hw_prod = rxr->rx_pg_prod;
2901
2902 for (i = 0; i < count; i++) {
2903 prod = RX_PG_RING_IDX(hw_prod);
2904
2905 prod_rx_pg = &rxr->rx_pg_ring[prod];
2906 cons_rx_pg = &rxr->rx_pg_ring[cons];
2907 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2908 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2909
2910 if (prod != cons) {
2911 prod_rx_pg->page = cons_rx_pg->page;
2912 cons_rx_pg->page = NULL;
2913 pci_unmap_addr_set(prod_rx_pg, mapping,
2914 pci_unmap_addr(cons_rx_pg, mapping));
2915
2916 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2917 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2918
2919 }
2920 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2921 hw_prod = NEXT_RX_BD(hw_prod);
2922 }
2923 rxr->rx_pg_prod = hw_prod;
2924 rxr->rx_pg_cons = cons;
2925 }
2926
2927 static inline void
2928 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2929 struct sk_buff *skb, u16 cons, u16 prod)
2930 {
2931 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2932 struct rx_bd *cons_bd, *prod_bd;
2933
2934 cons_rx_buf = &rxr->rx_buf_ring[cons];
2935 prod_rx_buf = &rxr->rx_buf_ring[prod];
2936
2937 pci_dma_sync_single_for_device(bp->pdev,
2938 pci_unmap_addr(cons_rx_buf, mapping),
2939 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2940
2941 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2942
2943 prod_rx_buf->skb = skb;
2944
2945 if (cons == prod)
2946 return;
2947
2948 pci_unmap_addr_set(prod_rx_buf, mapping,
2949 pci_unmap_addr(cons_rx_buf, mapping));
2950
2951 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2952 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2953 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2954 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2955 }
2956
2957 static int
2958 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2959 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2960 u32 ring_idx)
2961 {
2962 int err;
2963 u16 prod = ring_idx & 0xffff;
2964
2965 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2966 if (unlikely(err)) {
2967 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2968 if (hdr_len) {
2969 unsigned int raw_len = len + 4;
2970 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2971
2972 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2973 }
2974 return err;
2975 }
2976
2977 skb_reserve(skb, BNX2_RX_OFFSET);
2978 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2979 PCI_DMA_FROMDEVICE);
2980
2981 if (hdr_len == 0) {
2982 skb_put(skb, len);
2983 return 0;
2984 } else {
2985 unsigned int i, frag_len, frag_size, pages;
2986 struct sw_pg *rx_pg;
2987 u16 pg_cons = rxr->rx_pg_cons;
2988 u16 pg_prod = rxr->rx_pg_prod;
2989
2990 frag_size = len + 4 - hdr_len;
2991 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2992 skb_put(skb, hdr_len);
2993
2994 for (i = 0; i < pages; i++) {
2995 dma_addr_t mapping_old;
2996
2997 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2998 if (unlikely(frag_len <= 4)) {
2999 unsigned int tail = 4 - frag_len;
3000
3001 rxr->rx_pg_cons = pg_cons;
3002 rxr->rx_pg_prod = pg_prod;
3003 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3004 pages - i);
3005 skb->len -= tail;
3006 if (i == 0) {
3007 skb->tail -= tail;
3008 } else {
3009 skb_frag_t *frag =
3010 &skb_shinfo(skb)->frags[i - 1];
3011 frag->size -= tail;
3012 skb->data_len -= tail;
3013 skb->truesize -= tail;
3014 }
3015 return 0;
3016 }
3017 rx_pg = &rxr->rx_pg_ring[pg_cons];
3018
3019 /* Don't unmap yet. If we're unable to allocate a new
3020 * page, we need to recycle the page and the DMA addr.
3021 */
3022 mapping_old = pci_unmap_addr(rx_pg, mapping);
3023 if (i == pages - 1)
3024 frag_len -= 4;
3025
3026 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3027 rx_pg->page = NULL;
3028
3029 err = bnx2_alloc_rx_page(bp, rxr,
3030 RX_PG_RING_IDX(pg_prod));
3031 if (unlikely(err)) {
3032 rxr->rx_pg_cons = pg_cons;
3033 rxr->rx_pg_prod = pg_prod;
3034 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3035 pages - i);
3036 return err;
3037 }
3038
3039 pci_unmap_page(bp->pdev, mapping_old,
3040 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3041
3042 frag_size -= frag_len;
3043 skb->data_len += frag_len;
3044 skb->truesize += frag_len;
3045 skb->len += frag_len;
3046
3047 pg_prod = NEXT_RX_BD(pg_prod);
3048 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3049 }
3050 rxr->rx_pg_prod = pg_prod;
3051 rxr->rx_pg_cons = pg_cons;
3052 }
3053 return 0;
3054 }
3055
3056 static inline u16
3057 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3058 {
3059 u16 cons;
3060
3061 /* Tell compiler that status block fields can change. */
3062 barrier();
3063 cons = *bnapi->hw_rx_cons_ptr;
3064 barrier();
3065 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3066 cons++;
3067 return cons;
3068 }
3069
3070 static int
3071 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3072 {
3073 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3074 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3075 struct l2_fhdr *rx_hdr;
3076 int rx_pkt = 0, pg_ring_used = 0;
3077
3078 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3079 sw_cons = rxr->rx_cons;
3080 sw_prod = rxr->rx_prod;
3081
3082 /* Memory barrier necessary as speculative reads of the rx
3083 * buffer can be ahead of the index in the status block
3084 */
3085 rmb();
3086 while (sw_cons != hw_cons) {
3087 unsigned int len, hdr_len;
3088 u32 status;
3089 struct sw_bd *rx_buf;
3090 struct sk_buff *skb;
3091 dma_addr_t dma_addr;
3092 u16 vtag = 0;
3093 int hw_vlan __maybe_unused = 0;
3094
3095 sw_ring_cons = RX_RING_IDX(sw_cons);
3096 sw_ring_prod = RX_RING_IDX(sw_prod);
3097
3098 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3099 skb = rx_buf->skb;
3100
3101 rx_buf->skb = NULL;
3102
3103 dma_addr = pci_unmap_addr(rx_buf, mapping);
3104
3105 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3106 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3107 PCI_DMA_FROMDEVICE);
3108
3109 rx_hdr = (struct l2_fhdr *) skb->data;
3110 len = rx_hdr->l2_fhdr_pkt_len;
3111 status = rx_hdr->l2_fhdr_status;
3112
3113 hdr_len = 0;
3114 if (status & L2_FHDR_STATUS_SPLIT) {
3115 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3116 pg_ring_used = 1;
3117 } else if (len > bp->rx_jumbo_thresh) {
3118 hdr_len = bp->rx_jumbo_thresh;
3119 pg_ring_used = 1;
3120 }
3121
3122 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3123 L2_FHDR_ERRORS_PHY_DECODE |
3124 L2_FHDR_ERRORS_ALIGNMENT |
3125 L2_FHDR_ERRORS_TOO_SHORT |
3126 L2_FHDR_ERRORS_GIANT_FRAME))) {
3127
3128 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3129 sw_ring_prod);
3130 if (pg_ring_used) {
3131 int pages;
3132
3133 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3134
3135 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3136 }
3137 goto next_rx;
3138 }
3139
3140 len -= 4;
3141
3142 if (len <= bp->rx_copy_thresh) {
3143 struct sk_buff *new_skb;
3144
3145 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3146 if (new_skb == NULL) {
3147 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3148 sw_ring_prod);
3149 goto next_rx;
3150 }
3151
3152 /* aligned copy */
3153 skb_copy_from_linear_data_offset(skb,
3154 BNX2_RX_OFFSET - 6,
3155 new_skb->data, len + 6);
3156 skb_reserve(new_skb, 6);
3157 skb_put(new_skb, len);
3158
3159 bnx2_reuse_rx_skb(bp, rxr, skb,
3160 sw_ring_cons, sw_ring_prod);
3161
3162 skb = new_skb;
3163 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3164 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3165 goto next_rx;
3166
3167 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3168 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3169 vtag = rx_hdr->l2_fhdr_vlan_tag;
3170 #ifdef BCM_VLAN
3171 if (bp->vlgrp)
3172 hw_vlan = 1;
3173 else
3174 #endif
3175 {
3176 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3177 __skb_push(skb, 4);
3178
3179 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3180 ve->h_vlan_proto = htons(ETH_P_8021Q);
3181 ve->h_vlan_TCI = htons(vtag);
3182 len += 4;
3183 }
3184 }
3185
3186 skb->protocol = eth_type_trans(skb, bp->dev);
3187
3188 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3189 (ntohs(skb->protocol) != 0x8100)) {
3190
3191 dev_kfree_skb(skb);
3192 goto next_rx;
3193
3194 }
3195
3196 skb->ip_summed = CHECKSUM_NONE;
3197 if (bp->rx_csum &&
3198 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3199 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3200
3201 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3202 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3203 skb->ip_summed = CHECKSUM_UNNECESSARY;
3204 }
3205
3206 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3207
3208 #ifdef BCM_VLAN
3209 if (hw_vlan)
3210 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3211 else
3212 #endif
3213 netif_receive_skb(skb);
3214
3215 rx_pkt++;
3216
3217 next_rx:
3218 sw_cons = NEXT_RX_BD(sw_cons);
3219 sw_prod = NEXT_RX_BD(sw_prod);
3220
3221 if ((rx_pkt == budget))
3222 break;
3223
3224 /* Refresh hw_cons to see if there is new work */
3225 if (sw_cons == hw_cons) {
3226 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3227 rmb();
3228 }
3229 }
3230 rxr->rx_cons = sw_cons;
3231 rxr->rx_prod = sw_prod;
3232
3233 if (pg_ring_used)
3234 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3235
3236 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3237
3238 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3239
3240 mmiowb();
3241
3242 return rx_pkt;
3243
3244 }
3245
3246 /* MSI ISR - The only difference between this and the INTx ISR
3247 * is that the MSI interrupt is always serviced.
3248 */
3249 static irqreturn_t
3250 bnx2_msi(int irq, void *dev_instance)
3251 {
3252 struct bnx2_napi *bnapi = dev_instance;
3253 struct bnx2 *bp = bnapi->bp;
3254
3255 prefetch(bnapi->status_blk.msi);
3256 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3257 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3258 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3259
3260 /* Return here if interrupt is disabled. */
3261 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3262 return IRQ_HANDLED;
3263
3264 napi_schedule(&bnapi->napi);
3265
3266 return IRQ_HANDLED;
3267 }
3268
3269 static irqreturn_t
3270 bnx2_msi_1shot(int irq, void *dev_instance)
3271 {
3272 struct bnx2_napi *bnapi = dev_instance;
3273 struct bnx2 *bp = bnapi->bp;
3274
3275 prefetch(bnapi->status_blk.msi);
3276
3277 /* Return here if interrupt is disabled. */
3278 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3279 return IRQ_HANDLED;
3280
3281 napi_schedule(&bnapi->napi);
3282
3283 return IRQ_HANDLED;
3284 }
3285
3286 static irqreturn_t
3287 bnx2_interrupt(int irq, void *dev_instance)
3288 {
3289 struct bnx2_napi *bnapi = dev_instance;
3290 struct bnx2 *bp = bnapi->bp;
3291 struct status_block *sblk = bnapi->status_blk.msi;
3292
3293 /* When using INTx, it is possible for the interrupt to arrive
3294 * at the CPU before the status block posted prior to the
3295 * interrupt. Reading a register will flush the status block.
3296 * When using MSI, the MSI message will always complete after
3297 * the status block write.
3298 */
3299 if ((sblk->status_idx == bnapi->last_status_idx) &&
3300 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3301 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3302 return IRQ_NONE;
3303
3304 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3305 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3306 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3307
3308 /* Read back to deassert IRQ immediately to avoid too many
3309 * spurious interrupts.
3310 */
3311 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3312
3313 /* Return here if interrupt is shared and is disabled. */
3314 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3315 return IRQ_HANDLED;
3316
3317 if (napi_schedule_prep(&bnapi->napi)) {
3318 bnapi->last_status_idx = sblk->status_idx;
3319 __napi_schedule(&bnapi->napi);
3320 }
3321
3322 return IRQ_HANDLED;
3323 }
3324
3325 static inline int
3326 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3327 {
3328 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3329 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3330
3331 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3332 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3333 return 1;
3334 return 0;
3335 }
3336
3337 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3338 STATUS_ATTN_BITS_TIMER_ABORT)
3339
3340 static inline int
3341 bnx2_has_work(struct bnx2_napi *bnapi)
3342 {
3343 struct status_block *sblk = bnapi->status_blk.msi;
3344
3345 if (bnx2_has_fast_work(bnapi))
3346 return 1;
3347
3348 #ifdef BCM_CNIC
3349 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3350 return 1;
3351 #endif
3352
3353 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3354 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3355 return 1;
3356
3357 return 0;
3358 }
3359
3360 static void
3361 bnx2_chk_missed_msi(struct bnx2 *bp)
3362 {
3363 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3364 u32 msi_ctrl;
3365
3366 if (bnx2_has_work(bnapi)) {
3367 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3368 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3369 return;
3370
3371 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3372 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3373 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3374 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3375 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3376 }
3377 }
3378
3379 bp->idle_chk_status_idx = bnapi->last_status_idx;
3380 }
3381
3382 #ifdef BCM_CNIC
3383 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3384 {
3385 struct cnic_ops *c_ops;
3386
3387 if (!bnapi->cnic_present)
3388 return;
3389
3390 rcu_read_lock();
3391 c_ops = rcu_dereference(bp->cnic_ops);
3392 if (c_ops)
3393 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3394 bnapi->status_blk.msi);
3395 rcu_read_unlock();
3396 }
3397 #endif
3398
3399 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3400 {
3401 struct status_block *sblk = bnapi->status_blk.msi;
3402 u32 status_attn_bits = sblk->status_attn_bits;
3403 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3404
3405 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3406 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3407
3408 bnx2_phy_int(bp, bnapi);
3409
3410 /* This is needed to take care of transient status
3411 * during link changes.
3412 */
3413 REG_WR(bp, BNX2_HC_COMMAND,
3414 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3415 REG_RD(bp, BNX2_HC_COMMAND);
3416 }
3417 }
3418
3419 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3420 int work_done, int budget)
3421 {
3422 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3423 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3424
3425 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3426 bnx2_tx_int(bp, bnapi, 0);
3427
3428 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3429 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3430
3431 return work_done;
3432 }
3433
3434 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3435 {
3436 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3437 struct bnx2 *bp = bnapi->bp;
3438 int work_done = 0;
3439 struct status_block_msix *sblk = bnapi->status_blk.msix;
3440
3441 while (1) {
3442 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3443 if (unlikely(work_done >= budget))
3444 break;
3445
3446 bnapi->last_status_idx = sblk->status_idx;
3447 /* status idx must be read before checking for more work. */
3448 rmb();
3449 if (likely(!bnx2_has_fast_work(bnapi))) {
3450
3451 napi_complete(napi);
3452 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3453 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3454 bnapi->last_status_idx);
3455 break;
3456 }
3457 }
3458 return work_done;
3459 }
3460
3461 static int bnx2_poll(struct napi_struct *napi, int budget)
3462 {
3463 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3464 struct bnx2 *bp = bnapi->bp;
3465 int work_done = 0;
3466 struct status_block *sblk = bnapi->status_blk.msi;
3467
3468 while (1) {
3469 bnx2_poll_link(bp, bnapi);
3470
3471 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3472
3473 #ifdef BCM_CNIC
3474 bnx2_poll_cnic(bp, bnapi);
3475 #endif
3476
3477 /* bnapi->last_status_idx is used below to tell the hw how
3478 * much work has been processed, so we must read it before
3479 * checking for more work.
3480 */
3481 bnapi->last_status_idx = sblk->status_idx;
3482
3483 if (unlikely(work_done >= budget))
3484 break;
3485
3486 rmb();
3487 if (likely(!bnx2_has_work(bnapi))) {
3488 napi_complete(napi);
3489 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3490 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3491 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3492 bnapi->last_status_idx);
3493 break;
3494 }
3495 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3496 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3497 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3498 bnapi->last_status_idx);
3499
3500 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3501 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3502 bnapi->last_status_idx);
3503 break;
3504 }
3505 }
3506
3507 return work_done;
3508 }
3509
3510 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3511 * from set_multicast.
3512 */
3513 static void
3514 bnx2_set_rx_mode(struct net_device *dev)
3515 {
3516 struct bnx2 *bp = netdev_priv(dev);
3517 u32 rx_mode, sort_mode;
3518 struct netdev_hw_addr *ha;
3519 int i;
3520
3521 if (!netif_running(dev))
3522 return;
3523
3524 spin_lock_bh(&bp->phy_lock);
3525
3526 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3527 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3528 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3529 #ifdef BCM_VLAN
3530 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3531 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3532 #else
3533 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3534 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3535 #endif
3536 if (dev->flags & IFF_PROMISC) {
3537 /* Promiscuous mode. */
3538 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3539 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3540 BNX2_RPM_SORT_USER0_PROM_VLAN;
3541 }
3542 else if (dev->flags & IFF_ALLMULTI) {
3543 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3544 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3545 0xffffffff);
3546 }
3547 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3548 }
3549 else {
3550 /* Accept one or more multicast(s). */
3551 struct dev_mc_list *mclist;
3552 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3553 u32 regidx;
3554 u32 bit;
3555 u32 crc;
3556
3557 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3558
3559 netdev_for_each_mc_addr(mclist, dev) {
3560 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3561 bit = crc & 0xff;
3562 regidx = (bit & 0xe0) >> 5;
3563 bit &= 0x1f;
3564 mc_filter[regidx] |= (1 << bit);
3565 }
3566
3567 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3568 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3569 mc_filter[i]);
3570 }
3571
3572 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3573 }
3574
3575 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3576 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3577 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3578 BNX2_RPM_SORT_USER0_PROM_VLAN;
3579 } else if (!(dev->flags & IFF_PROMISC)) {
3580 /* Add all entries into to the match filter list */
3581 i = 0;
3582 netdev_for_each_uc_addr(ha, dev) {
3583 bnx2_set_mac_addr(bp, ha->addr,
3584 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3585 sort_mode |= (1 <<
3586 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3587 i++;
3588 }
3589
3590 }
3591
3592 if (rx_mode != bp->rx_mode) {
3593 bp->rx_mode = rx_mode;
3594 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3595 }
3596
3597 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3598 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3599 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3600
3601 spin_unlock_bh(&bp->phy_lock);
3602 }
3603
3604 static int __devinit
3605 check_fw_section(const struct firmware *fw,
3606 const struct bnx2_fw_file_section *section,
3607 u32 alignment, bool non_empty)
3608 {
3609 u32 offset = be32_to_cpu(section->offset);
3610 u32 len = be32_to_cpu(section->len);
3611
3612 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3613 return -EINVAL;
3614 if ((non_empty && len == 0) || len > fw->size - offset ||
3615 len & (alignment - 1))
3616 return -EINVAL;
3617 return 0;
3618 }
3619
3620 static int __devinit
3621 check_mips_fw_entry(const struct firmware *fw,
3622 const struct bnx2_mips_fw_file_entry *entry)
3623 {
3624 if (check_fw_section(fw, &entry->text, 4, true) ||
3625 check_fw_section(fw, &entry->data, 4, false) ||
3626 check_fw_section(fw, &entry->rodata, 4, false))
3627 return -EINVAL;
3628 return 0;
3629 }
3630
3631 static int __devinit
3632 bnx2_request_firmware(struct bnx2 *bp)
3633 {
3634 const char *mips_fw_file, *rv2p_fw_file;
3635 const struct bnx2_mips_fw_file *mips_fw;
3636 const struct bnx2_rv2p_fw_file *rv2p_fw;
3637 int rc;
3638
3639 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3640 mips_fw_file = FW_MIPS_FILE_09;
3641 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3642 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3643 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3644 else
3645 rv2p_fw_file = FW_RV2P_FILE_09;
3646 } else {
3647 mips_fw_file = FW_MIPS_FILE_06;
3648 rv2p_fw_file = FW_RV2P_FILE_06;
3649 }
3650
3651 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3652 if (rc) {
3653 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3654 return rc;
3655 }
3656
3657 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3658 if (rc) {
3659 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3660 return rc;
3661 }
3662 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3663 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3664 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3665 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3666 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3667 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3668 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3669 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3670 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3671 return -EINVAL;
3672 }
3673 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3674 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3675 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3676 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3677 return -EINVAL;
3678 }
3679
3680 return 0;
3681 }
3682
3683 static u32
3684 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3685 {
3686 switch (idx) {
3687 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3688 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3689 rv2p_code |= RV2P_BD_PAGE_SIZE;
3690 break;
3691 }
3692 return rv2p_code;
3693 }
3694
3695 static int
3696 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3697 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3698 {
3699 u32 rv2p_code_len, file_offset;
3700 __be32 *rv2p_code;
3701 int i;
3702 u32 val, cmd, addr;
3703
3704 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3705 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3706
3707 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3708
3709 if (rv2p_proc == RV2P_PROC1) {
3710 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3711 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3712 } else {
3713 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3714 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3715 }
3716
3717 for (i = 0; i < rv2p_code_len; i += 8) {
3718 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3719 rv2p_code++;
3720 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3721 rv2p_code++;
3722
3723 val = (i / 8) | cmd;
3724 REG_WR(bp, addr, val);
3725 }
3726
3727 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3728 for (i = 0; i < 8; i++) {
3729 u32 loc, code;
3730
3731 loc = be32_to_cpu(fw_entry->fixup[i]);
3732 if (loc && ((loc * 4) < rv2p_code_len)) {
3733 code = be32_to_cpu(*(rv2p_code + loc - 1));
3734 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3735 code = be32_to_cpu(*(rv2p_code + loc));
3736 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3737 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3738
3739 val = (loc / 2) | cmd;
3740 REG_WR(bp, addr, val);
3741 }
3742 }
3743
3744 /* Reset the processor, un-stall is done later. */
3745 if (rv2p_proc == RV2P_PROC1) {
3746 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3747 }
3748 else {
3749 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3750 }
3751
3752 return 0;
3753 }
3754
3755 static int
3756 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3757 const struct bnx2_mips_fw_file_entry *fw_entry)
3758 {
3759 u32 addr, len, file_offset;
3760 __be32 *data;
3761 u32 offset;
3762 u32 val;
3763
3764 /* Halt the CPU. */
3765 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3766 val |= cpu_reg->mode_value_halt;
3767 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3768 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3769
3770 /* Load the Text area. */
3771 addr = be32_to_cpu(fw_entry->text.addr);
3772 len = be32_to_cpu(fw_entry->text.len);
3773 file_offset = be32_to_cpu(fw_entry->text.offset);
3774 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3775
3776 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3777 if (len) {
3778 int j;
3779
3780 for (j = 0; j < (len / 4); j++, offset += 4)
3781 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3782 }
3783
3784 /* Load the Data area. */
3785 addr = be32_to_cpu(fw_entry->data.addr);
3786 len = be32_to_cpu(fw_entry->data.len);
3787 file_offset = be32_to_cpu(fw_entry->data.offset);
3788 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3789
3790 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3791 if (len) {
3792 int j;
3793
3794 for (j = 0; j < (len / 4); j++, offset += 4)
3795 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3796 }
3797
3798 /* Load the Read-Only area. */
3799 addr = be32_to_cpu(fw_entry->rodata.addr);
3800 len = be32_to_cpu(fw_entry->rodata.len);
3801 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3802 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3803
3804 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3805 if (len) {
3806 int j;
3807
3808 for (j = 0; j < (len / 4); j++, offset += 4)
3809 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3810 }
3811
3812 /* Clear the pre-fetch instruction. */
3813 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3814
3815 val = be32_to_cpu(fw_entry->start_addr);
3816 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3817
3818 /* Start the CPU. */
3819 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3820 val &= ~cpu_reg->mode_value_halt;
3821 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3822 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3823
3824 return 0;
3825 }
3826
3827 static int
3828 bnx2_init_cpus(struct bnx2 *bp)
3829 {
3830 const struct bnx2_mips_fw_file *mips_fw =
3831 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3832 const struct bnx2_rv2p_fw_file *rv2p_fw =
3833 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3834 int rc;
3835
3836 /* Initialize the RV2P processor. */
3837 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3838 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3839
3840 /* Initialize the RX Processor. */
3841 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3842 if (rc)
3843 goto init_cpu_err;
3844
3845 /* Initialize the TX Processor. */
3846 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3847 if (rc)
3848 goto init_cpu_err;
3849
3850 /* Initialize the TX Patch-up Processor. */
3851 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3852 if (rc)
3853 goto init_cpu_err;
3854
3855 /* Initialize the Completion Processor. */
3856 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3857 if (rc)
3858 goto init_cpu_err;
3859
3860 /* Initialize the Command Processor. */
3861 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3862
3863 init_cpu_err:
3864 return rc;
3865 }
3866
3867 static int
3868 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3869 {
3870 u16 pmcsr;
3871
3872 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3873
3874 switch (state) {
3875 case PCI_D0: {
3876 u32 val;
3877
3878 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3879 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3880 PCI_PM_CTRL_PME_STATUS);
3881
3882 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3883 /* delay required during transition out of D3hot */
3884 msleep(20);
3885
3886 val = REG_RD(bp, BNX2_EMAC_MODE);
3887 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3888 val &= ~BNX2_EMAC_MODE_MPKT;
3889 REG_WR(bp, BNX2_EMAC_MODE, val);
3890
3891 val = REG_RD(bp, BNX2_RPM_CONFIG);
3892 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3893 REG_WR(bp, BNX2_RPM_CONFIG, val);
3894 break;
3895 }
3896 case PCI_D3hot: {
3897 int i;
3898 u32 val, wol_msg;
3899
3900 if (bp->wol) {
3901 u32 advertising;
3902 u8 autoneg;
3903
3904 autoneg = bp->autoneg;
3905 advertising = bp->advertising;
3906
3907 if (bp->phy_port == PORT_TP) {
3908 bp->autoneg = AUTONEG_SPEED;
3909 bp->advertising = ADVERTISED_10baseT_Half |
3910 ADVERTISED_10baseT_Full |
3911 ADVERTISED_100baseT_Half |
3912 ADVERTISED_100baseT_Full |
3913 ADVERTISED_Autoneg;
3914 }
3915
3916 spin_lock_bh(&bp->phy_lock);
3917 bnx2_setup_phy(bp, bp->phy_port);
3918 spin_unlock_bh(&bp->phy_lock);
3919
3920 bp->autoneg = autoneg;
3921 bp->advertising = advertising;
3922
3923 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3924
3925 val = REG_RD(bp, BNX2_EMAC_MODE);
3926
3927 /* Enable port mode. */
3928 val &= ~BNX2_EMAC_MODE_PORT;
3929 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3930 BNX2_EMAC_MODE_ACPI_RCVD |
3931 BNX2_EMAC_MODE_MPKT;
3932 if (bp->phy_port == PORT_TP)
3933 val |= BNX2_EMAC_MODE_PORT_MII;
3934 else {
3935 val |= BNX2_EMAC_MODE_PORT_GMII;
3936 if (bp->line_speed == SPEED_2500)
3937 val |= BNX2_EMAC_MODE_25G_MODE;
3938 }
3939
3940 REG_WR(bp, BNX2_EMAC_MODE, val);
3941
3942 /* receive all multicast */
3943 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3944 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3945 0xffffffff);
3946 }
3947 REG_WR(bp, BNX2_EMAC_RX_MODE,
3948 BNX2_EMAC_RX_MODE_SORT_MODE);
3949
3950 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3951 BNX2_RPM_SORT_USER0_MC_EN;
3952 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3953 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3954 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3955 BNX2_RPM_SORT_USER0_ENA);
3956
3957 /* Need to enable EMAC and RPM for WOL. */
3958 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3959 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3960 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3961 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3962
3963 val = REG_RD(bp, BNX2_RPM_CONFIG);
3964 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3965 REG_WR(bp, BNX2_RPM_CONFIG, val);
3966
3967 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3968 }
3969 else {
3970 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3971 }
3972
3973 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3974 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3975 1, 0);
3976
3977 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3978 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3979 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3980
3981 if (bp->wol)
3982 pmcsr |= 3;
3983 }
3984 else {
3985 pmcsr |= 3;
3986 }
3987 if (bp->wol) {
3988 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3989 }
3990 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3991 pmcsr);
3992
3993 /* No more memory access after this point until
3994 * device is brought back to D0.
3995 */
3996 udelay(50);
3997 break;
3998 }
3999 default:
4000 return -EINVAL;
4001 }
4002 return 0;
4003 }
4004
4005 static int
4006 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4007 {
4008 u32 val;
4009 int j;
4010
4011 /* Request access to the flash interface. */
4012 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4013 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4014 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4015 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4016 break;
4017
4018 udelay(5);
4019 }
4020
4021 if (j >= NVRAM_TIMEOUT_COUNT)
4022 return -EBUSY;
4023
4024 return 0;
4025 }
4026
4027 static int
4028 bnx2_release_nvram_lock(struct bnx2 *bp)
4029 {
4030 int j;
4031 u32 val;
4032
4033 /* Relinquish nvram interface. */
4034 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4035
4036 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4037 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4038 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4039 break;
4040
4041 udelay(5);
4042 }
4043
4044 if (j >= NVRAM_TIMEOUT_COUNT)
4045 return -EBUSY;
4046
4047 return 0;
4048 }
4049
4050
4051 static int
4052 bnx2_enable_nvram_write(struct bnx2 *bp)
4053 {
4054 u32 val;
4055
4056 val = REG_RD(bp, BNX2_MISC_CFG);
4057 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4058
4059 if (bp->flash_info->flags & BNX2_NV_WREN) {
4060 int j;
4061
4062 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4063 REG_WR(bp, BNX2_NVM_COMMAND,
4064 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4065
4066 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4067 udelay(5);
4068
4069 val = REG_RD(bp, BNX2_NVM_COMMAND);
4070 if (val & BNX2_NVM_COMMAND_DONE)
4071 break;
4072 }
4073
4074 if (j >= NVRAM_TIMEOUT_COUNT)
4075 return -EBUSY;
4076 }
4077 return 0;
4078 }
4079
4080 static void
4081 bnx2_disable_nvram_write(struct bnx2 *bp)
4082 {
4083 u32 val;
4084
4085 val = REG_RD(bp, BNX2_MISC_CFG);
4086 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4087 }
4088
4089
4090 static void
4091 bnx2_enable_nvram_access(struct bnx2 *bp)
4092 {
4093 u32 val;
4094
4095 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4096 /* Enable both bits, even on read. */
4097 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4098 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4099 }
4100
4101 static void
4102 bnx2_disable_nvram_access(struct bnx2 *bp)
4103 {
4104 u32 val;
4105
4106 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4107 /* Disable both bits, even after read. */
4108 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4109 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4110 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4111 }
4112
4113 static int
4114 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4115 {
4116 u32 cmd;
4117 int j;
4118
4119 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4120 /* Buffered flash, no erase needed */
4121 return 0;
4122
4123 /* Build an erase command */
4124 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4125 BNX2_NVM_COMMAND_DOIT;
4126
4127 /* Need to clear DONE bit separately. */
4128 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4129
4130 /* Address of the NVRAM to read from. */
4131 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4132
4133 /* Issue an erase command. */
4134 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4135
4136 /* Wait for completion. */
4137 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4138 u32 val;
4139
4140 udelay(5);
4141
4142 val = REG_RD(bp, BNX2_NVM_COMMAND);
4143 if (val & BNX2_NVM_COMMAND_DONE)
4144 break;
4145 }
4146
4147 if (j >= NVRAM_TIMEOUT_COUNT)
4148 return -EBUSY;
4149
4150 return 0;
4151 }
4152
4153 static int
4154 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4155 {
4156 u32 cmd;
4157 int j;
4158
4159 /* Build the command word. */
4160 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4161
4162 /* Calculate an offset of a buffered flash, not needed for 5709. */
4163 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4164 offset = ((offset / bp->flash_info->page_size) <<
4165 bp->flash_info->page_bits) +
4166 (offset % bp->flash_info->page_size);
4167 }
4168
4169 /* Need to clear DONE bit separately. */
4170 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4171
4172 /* Address of the NVRAM to read from. */
4173 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4174
4175 /* Issue a read command. */
4176 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4177
4178 /* Wait for completion. */
4179 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4180 u32 val;
4181
4182 udelay(5);
4183
4184 val = REG_RD(bp, BNX2_NVM_COMMAND);
4185 if (val & BNX2_NVM_COMMAND_DONE) {
4186 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4187 memcpy(ret_val, &v, 4);
4188 break;
4189 }
4190 }
4191 if (j >= NVRAM_TIMEOUT_COUNT)
4192 return -EBUSY;
4193
4194 return 0;
4195 }
4196
4197
4198 static int
4199 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4200 {
4201 u32 cmd;
4202 __be32 val32;
4203 int j;
4204
4205 /* Build the command word. */
4206 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4207
4208 /* Calculate an offset of a buffered flash, not needed for 5709. */
4209 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4210 offset = ((offset / bp->flash_info->page_size) <<
4211 bp->flash_info->page_bits) +
4212 (offset % bp->flash_info->page_size);
4213 }
4214
4215 /* Need to clear DONE bit separately. */
4216 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4217
4218 memcpy(&val32, val, 4);
4219
4220 /* Write the data. */
4221 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4222
4223 /* Address of the NVRAM to write to. */
4224 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4225
4226 /* Issue the write command. */
4227 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4228
4229 /* Wait for completion. */
4230 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4231 udelay(5);
4232
4233 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4234 break;
4235 }
4236 if (j >= NVRAM_TIMEOUT_COUNT)
4237 return -EBUSY;
4238
4239 return 0;
4240 }
4241
4242 static int
4243 bnx2_init_nvram(struct bnx2 *bp)
4244 {
4245 u32 val;
4246 int j, entry_count, rc = 0;
4247 const struct flash_spec *flash;
4248
4249 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4250 bp->flash_info = &flash_5709;
4251 goto get_flash_size;
4252 }
4253
4254 /* Determine the selected interface. */
4255 val = REG_RD(bp, BNX2_NVM_CFG1);
4256
4257 entry_count = ARRAY_SIZE(flash_table);
4258
4259 if (val & 0x40000000) {
4260
4261 /* Flash interface has been reconfigured */
4262 for (j = 0, flash = &flash_table[0]; j < entry_count;
4263 j++, flash++) {
4264 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4265 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4266 bp->flash_info = flash;
4267 break;
4268 }
4269 }
4270 }
4271 else {
4272 u32 mask;
4273 /* Not yet been reconfigured */
4274
4275 if (val & (1 << 23))
4276 mask = FLASH_BACKUP_STRAP_MASK;
4277 else
4278 mask = FLASH_STRAP_MASK;
4279
4280 for (j = 0, flash = &flash_table[0]; j < entry_count;
4281 j++, flash++) {
4282
4283 if ((val & mask) == (flash->strapping & mask)) {
4284 bp->flash_info = flash;
4285
4286 /* Request access to the flash interface. */
4287 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4288 return rc;
4289
4290 /* Enable access to flash interface */
4291 bnx2_enable_nvram_access(bp);
4292
4293 /* Reconfigure the flash interface */
4294 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4295 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4296 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4297 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4298
4299 /* Disable access to flash interface */
4300 bnx2_disable_nvram_access(bp);
4301 bnx2_release_nvram_lock(bp);
4302
4303 break;
4304 }
4305 }
4306 } /* if (val & 0x40000000) */
4307
4308 if (j == entry_count) {
4309 bp->flash_info = NULL;
4310 pr_alert("Unknown flash/EEPROM type\n");
4311 return -ENODEV;
4312 }
4313
4314 get_flash_size:
4315 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4316 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4317 if (val)
4318 bp->flash_size = val;
4319 else
4320 bp->flash_size = bp->flash_info->total_size;
4321
4322 return rc;
4323 }
4324
4325 static int
4326 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4327 int buf_size)
4328 {
4329 int rc = 0;
4330 u32 cmd_flags, offset32, len32, extra;
4331
4332 if (buf_size == 0)
4333 return 0;
4334
4335 /* Request access to the flash interface. */
4336 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4337 return rc;
4338
4339 /* Enable access to flash interface */
4340 bnx2_enable_nvram_access(bp);
4341
4342 len32 = buf_size;
4343 offset32 = offset;
4344 extra = 0;
4345
4346 cmd_flags = 0;
4347
4348 if (offset32 & 3) {
4349 u8 buf[4];
4350 u32 pre_len;
4351
4352 offset32 &= ~3;
4353 pre_len = 4 - (offset & 3);
4354
4355 if (pre_len >= len32) {
4356 pre_len = len32;
4357 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4358 BNX2_NVM_COMMAND_LAST;
4359 }
4360 else {
4361 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4362 }
4363
4364 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4365
4366 if (rc)
4367 return rc;
4368
4369 memcpy(ret_buf, buf + (offset & 3), pre_len);
4370
4371 offset32 += 4;
4372 ret_buf += pre_len;
4373 len32 -= pre_len;
4374 }
4375 if (len32 & 3) {
4376 extra = 4 - (len32 & 3);
4377 len32 = (len32 + 4) & ~3;
4378 }
4379
4380 if (len32 == 4) {
4381 u8 buf[4];
4382
4383 if (cmd_flags)
4384 cmd_flags = BNX2_NVM_COMMAND_LAST;
4385 else
4386 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4387 BNX2_NVM_COMMAND_LAST;
4388
4389 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4390
4391 memcpy(ret_buf, buf, 4 - extra);
4392 }
4393 else if (len32 > 0) {
4394 u8 buf[4];
4395
4396 /* Read the first word. */
4397 if (cmd_flags)
4398 cmd_flags = 0;
4399 else
4400 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4401
4402 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4403
4404 /* Advance to the next dword. */
4405 offset32 += 4;
4406 ret_buf += 4;
4407 len32 -= 4;
4408
4409 while (len32 > 4 && rc == 0) {
4410 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4411
4412 /* Advance to the next dword. */
4413 offset32 += 4;
4414 ret_buf += 4;
4415 len32 -= 4;
4416 }
4417
4418 if (rc)
4419 return rc;
4420
4421 cmd_flags = BNX2_NVM_COMMAND_LAST;
4422 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4423
4424 memcpy(ret_buf, buf, 4 - extra);
4425 }
4426
4427 /* Disable access to flash interface */
4428 bnx2_disable_nvram_access(bp);
4429
4430 bnx2_release_nvram_lock(bp);
4431
4432 return rc;
4433 }
4434
4435 static int
4436 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4437 int buf_size)
4438 {
4439 u32 written, offset32, len32;
4440 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4441 int rc = 0;
4442 int align_start, align_end;
4443
4444 buf = data_buf;
4445 offset32 = offset;
4446 len32 = buf_size;
4447 align_start = align_end = 0;
4448
4449 if ((align_start = (offset32 & 3))) {
4450 offset32 &= ~3;
4451 len32 += align_start;
4452 if (len32 < 4)
4453 len32 = 4;
4454 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4455 return rc;
4456 }
4457
4458 if (len32 & 3) {
4459 align_end = 4 - (len32 & 3);
4460 len32 += align_end;
4461 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4462 return rc;
4463 }
4464
4465 if (align_start || align_end) {
4466 align_buf = kmalloc(len32, GFP_KERNEL);
4467 if (align_buf == NULL)
4468 return -ENOMEM;
4469 if (align_start) {
4470 memcpy(align_buf, start, 4);
4471 }
4472 if (align_end) {
4473 memcpy(align_buf + len32 - 4, end, 4);
4474 }
4475 memcpy(align_buf + align_start, data_buf, buf_size);
4476 buf = align_buf;
4477 }
4478
4479 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4480 flash_buffer = kmalloc(264, GFP_KERNEL);
4481 if (flash_buffer == NULL) {
4482 rc = -ENOMEM;
4483 goto nvram_write_end;
4484 }
4485 }
4486
4487 written = 0;
4488 while ((written < len32) && (rc == 0)) {
4489 u32 page_start, page_end, data_start, data_end;
4490 u32 addr, cmd_flags;
4491 int i;
4492
4493 /* Find the page_start addr */
4494 page_start = offset32 + written;
4495 page_start -= (page_start % bp->flash_info->page_size);
4496 /* Find the page_end addr */
4497 page_end = page_start + bp->flash_info->page_size;
4498 /* Find the data_start addr */
4499 data_start = (written == 0) ? offset32 : page_start;
4500 /* Find the data_end addr */
4501 data_end = (page_end > offset32 + len32) ?
4502 (offset32 + len32) : page_end;
4503
4504 /* Request access to the flash interface. */
4505 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4506 goto nvram_write_end;
4507
4508 /* Enable access to flash interface */
4509 bnx2_enable_nvram_access(bp);
4510
4511 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4512 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4513 int j;
4514
4515 /* Read the whole page into the buffer
4516 * (non-buffer flash only) */
4517 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4518 if (j == (bp->flash_info->page_size - 4)) {
4519 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4520 }
4521 rc = bnx2_nvram_read_dword(bp,
4522 page_start + j,
4523 &flash_buffer[j],
4524 cmd_flags);
4525
4526 if (rc)
4527 goto nvram_write_end;
4528
4529 cmd_flags = 0;
4530 }
4531 }
4532
4533 /* Enable writes to flash interface (unlock write-protect) */
4534 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4535 goto nvram_write_end;
4536
4537 /* Loop to write back the buffer data from page_start to
4538 * data_start */
4539 i = 0;
4540 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4541 /* Erase the page */
4542 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4543 goto nvram_write_end;
4544
4545 /* Re-enable the write again for the actual write */
4546 bnx2_enable_nvram_write(bp);
4547
4548 for (addr = page_start; addr < data_start;
4549 addr += 4, i += 4) {
4550
4551 rc = bnx2_nvram_write_dword(bp, addr,
4552 &flash_buffer[i], cmd_flags);
4553
4554 if (rc != 0)
4555 goto nvram_write_end;
4556
4557 cmd_flags = 0;
4558 }
4559 }
4560
4561 /* Loop to write the new data from data_start to data_end */
4562 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4563 if ((addr == page_end - 4) ||
4564 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4565 (addr == data_end - 4))) {
4566
4567 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4568 }
4569 rc = bnx2_nvram_write_dword(bp, addr, buf,
4570 cmd_flags);
4571
4572 if (rc != 0)
4573 goto nvram_write_end;
4574
4575 cmd_flags = 0;
4576 buf += 4;
4577 }
4578
4579 /* Loop to write back the buffer data from data_end
4580 * to page_end */
4581 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4582 for (addr = data_end; addr < page_end;
4583 addr += 4, i += 4) {
4584
4585 if (addr == page_end-4) {
4586 cmd_flags = BNX2_NVM_COMMAND_LAST;
4587 }
4588 rc = bnx2_nvram_write_dword(bp, addr,
4589 &flash_buffer[i], cmd_flags);
4590
4591 if (rc != 0)
4592 goto nvram_write_end;
4593
4594 cmd_flags = 0;
4595 }
4596 }
4597
4598 /* Disable writes to flash interface (lock write-protect) */
4599 bnx2_disable_nvram_write(bp);
4600
4601 /* Disable access to flash interface */
4602 bnx2_disable_nvram_access(bp);
4603 bnx2_release_nvram_lock(bp);
4604
4605 /* Increment written */
4606 written += data_end - data_start;
4607 }
4608
4609 nvram_write_end:
4610 kfree(flash_buffer);
4611 kfree(align_buf);
4612 return rc;
4613 }
4614
4615 static void
4616 bnx2_init_fw_cap(struct bnx2 *bp)
4617 {
4618 u32 val, sig = 0;
4619
4620 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4621 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4622
4623 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4624 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4625
4626 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4627 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4628 return;
4629
4630 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4631 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4632 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4633 }
4634
4635 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4636 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4637 u32 link;
4638
4639 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4640
4641 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4642 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4643 bp->phy_port = PORT_FIBRE;
4644 else
4645 bp->phy_port = PORT_TP;
4646
4647 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4648 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4649 }
4650
4651 if (netif_running(bp->dev) && sig)
4652 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4653 }
4654
4655 static void
4656 bnx2_setup_msix_tbl(struct bnx2 *bp)
4657 {
4658 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4659
4660 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4661 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4662 }
4663
4664 static int
4665 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4666 {
4667 u32 val;
4668 int i, rc = 0;
4669 u8 old_port;
4670
4671 /* Wait for the current PCI transaction to complete before
4672 * issuing a reset. */
4673 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4674 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4675 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4676 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4677 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4678 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4679 udelay(5);
4680
4681 /* Wait for the firmware to tell us it is ok to issue a reset. */
4682 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4683
4684 /* Deposit a driver reset signature so the firmware knows that
4685 * this is a soft reset. */
4686 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4687 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4688
4689 /* Do a dummy read to force the chip to complete all current transaction
4690 * before we issue a reset. */
4691 val = REG_RD(bp, BNX2_MISC_ID);
4692
4693 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4694 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4695 REG_RD(bp, BNX2_MISC_COMMAND);
4696 udelay(5);
4697
4698 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4699 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4700
4701 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4702
4703 } else {
4704 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4705 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4706 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4707
4708 /* Chip reset. */
4709 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4710
4711 /* Reading back any register after chip reset will hang the
4712 * bus on 5706 A0 and A1. The msleep below provides plenty
4713 * of margin for write posting.
4714 */
4715 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4716 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4717 msleep(20);
4718
4719 /* Reset takes approximate 30 usec */
4720 for (i = 0; i < 10; i++) {
4721 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4722 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4723 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4724 break;
4725 udelay(10);
4726 }
4727
4728 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4729 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4730 pr_err("Chip reset did not complete\n");
4731 return -EBUSY;
4732 }
4733 }
4734
4735 /* Make sure byte swapping is properly configured. */
4736 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4737 if (val != 0x01020304) {
4738 pr_err("Chip not in correct endian mode\n");
4739 return -ENODEV;
4740 }
4741
4742 /* Wait for the firmware to finish its initialization. */
4743 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4744 if (rc)
4745 return rc;
4746
4747 spin_lock_bh(&bp->phy_lock);
4748 old_port = bp->phy_port;
4749 bnx2_init_fw_cap(bp);
4750 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4751 old_port != bp->phy_port)
4752 bnx2_set_default_remote_link(bp);
4753 spin_unlock_bh(&bp->phy_lock);
4754
4755 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4756 /* Adjust the voltage regular to two steps lower. The default
4757 * of this register is 0x0000000e. */
4758 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4759
4760 /* Remove bad rbuf memory from the free pool. */
4761 rc = bnx2_alloc_bad_rbuf(bp);
4762 }
4763
4764 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4765 bnx2_setup_msix_tbl(bp);
4766 /* Prevent MSIX table reads and write from timing out */
4767 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4768 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4769 }
4770
4771 return rc;
4772 }
4773
4774 static int
4775 bnx2_init_chip(struct bnx2 *bp)
4776 {
4777 u32 val, mtu;
4778 int rc, i;
4779
4780 /* Make sure the interrupt is not active. */
4781 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4782
4783 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4784 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4785 #ifdef __BIG_ENDIAN
4786 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4787 #endif
4788 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4789 DMA_READ_CHANS << 12 |
4790 DMA_WRITE_CHANS << 16;
4791
4792 val |= (0x2 << 20) | (1 << 11);
4793
4794 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4795 val |= (1 << 23);
4796
4797 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4798 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4799 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4800
4801 REG_WR(bp, BNX2_DMA_CONFIG, val);
4802
4803 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4804 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4805 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4806 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4807 }
4808
4809 if (bp->flags & BNX2_FLAG_PCIX) {
4810 u16 val16;
4811
4812 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4813 &val16);
4814 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4815 val16 & ~PCI_X_CMD_ERO);
4816 }
4817
4818 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4819 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4820 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4821 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4822
4823 /* Initialize context mapping and zero out the quick contexts. The
4824 * context block must have already been enabled. */
4825 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4826 rc = bnx2_init_5709_context(bp);
4827 if (rc)
4828 return rc;
4829 } else
4830 bnx2_init_context(bp);
4831
4832 if ((rc = bnx2_init_cpus(bp)) != 0)
4833 return rc;
4834
4835 bnx2_init_nvram(bp);
4836
4837 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4838
4839 val = REG_RD(bp, BNX2_MQ_CONFIG);
4840 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4841 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4842 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4843 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4844 if (CHIP_REV(bp) == CHIP_REV_Ax)
4845 val |= BNX2_MQ_CONFIG_HALT_DIS;
4846 }
4847
4848 REG_WR(bp, BNX2_MQ_CONFIG, val);
4849
4850 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4851 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4852 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4853
4854 val = (BCM_PAGE_BITS - 8) << 24;
4855 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4856
4857 /* Configure page size. */
4858 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4859 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4860 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4861 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4862
4863 val = bp->mac_addr[0] +
4864 (bp->mac_addr[1] << 8) +
4865 (bp->mac_addr[2] << 16) +
4866 bp->mac_addr[3] +
4867 (bp->mac_addr[4] << 8) +
4868 (bp->mac_addr[5] << 16);
4869 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4870
4871 /* Program the MTU. Also include 4 bytes for CRC32. */
4872 mtu = bp->dev->mtu;
4873 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4874 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4875 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4876 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4877
4878 if (mtu < 1500)
4879 mtu = 1500;
4880
4881 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4882 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4883 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4884
4885 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4886 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4887 bp->bnx2_napi[i].last_status_idx = 0;
4888
4889 bp->idle_chk_status_idx = 0xffff;
4890
4891 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4892
4893 /* Set up how to generate a link change interrupt. */
4894 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4895
4896 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4897 (u64) bp->status_blk_mapping & 0xffffffff);
4898 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4899
4900 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4901 (u64) bp->stats_blk_mapping & 0xffffffff);
4902 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4903 (u64) bp->stats_blk_mapping >> 32);
4904
4905 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4906 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4907
4908 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4909 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4910
4911 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4912 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4913
4914 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4915
4916 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4917
4918 REG_WR(bp, BNX2_HC_COM_TICKS,
4919 (bp->com_ticks_int << 16) | bp->com_ticks);
4920
4921 REG_WR(bp, BNX2_HC_CMD_TICKS,
4922 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4923
4924 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4925 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4926 else
4927 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4928 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4929
4930 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4931 val = BNX2_HC_CONFIG_COLLECT_STATS;
4932 else {
4933 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4934 BNX2_HC_CONFIG_COLLECT_STATS;
4935 }
4936
4937 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4938 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4939 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4940
4941 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4942 }
4943
4944 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4945 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4946
4947 REG_WR(bp, BNX2_HC_CONFIG, val);
4948
4949 for (i = 1; i < bp->irq_nvecs; i++) {
4950 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4951 BNX2_HC_SB_CONFIG_1;
4952
4953 REG_WR(bp, base,
4954 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4955 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4956 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4957
4958 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4959 (bp->tx_quick_cons_trip_int << 16) |
4960 bp->tx_quick_cons_trip);
4961
4962 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4963 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4964
4965 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4966 (bp->rx_quick_cons_trip_int << 16) |
4967 bp->rx_quick_cons_trip);
4968
4969 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4970 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4971 }
4972
4973 /* Clear internal stats counters. */
4974 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4975
4976 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4977
4978 /* Initialize the receive filter. */
4979 bnx2_set_rx_mode(bp->dev);
4980
4981 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4982 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4983 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4984 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4985 }
4986 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4987 1, 0);
4988
4989 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4990 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4991
4992 udelay(20);
4993
4994 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4995
4996 return rc;
4997 }
4998
4999 static void
5000 bnx2_clear_ring_states(struct bnx2 *bp)
5001 {
5002 struct bnx2_napi *bnapi;
5003 struct bnx2_tx_ring_info *txr;
5004 struct bnx2_rx_ring_info *rxr;
5005 int i;
5006
5007 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5008 bnapi = &bp->bnx2_napi[i];
5009 txr = &bnapi->tx_ring;
5010 rxr = &bnapi->rx_ring;
5011
5012 txr->tx_cons = 0;
5013 txr->hw_tx_cons = 0;
5014 rxr->rx_prod_bseq = 0;
5015 rxr->rx_prod = 0;
5016 rxr->rx_cons = 0;
5017 rxr->rx_pg_prod = 0;
5018 rxr->rx_pg_cons = 0;
5019 }
5020 }
5021
5022 static void
5023 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5024 {
5025 u32 val, offset0, offset1, offset2, offset3;
5026 u32 cid_addr = GET_CID_ADDR(cid);
5027
5028 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5029 offset0 = BNX2_L2CTX_TYPE_XI;
5030 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5031 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5032 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5033 } else {
5034 offset0 = BNX2_L2CTX_TYPE;
5035 offset1 = BNX2_L2CTX_CMD_TYPE;
5036 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5037 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5038 }
5039 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5040 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5041
5042 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5043 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5044
5045 val = (u64) txr->tx_desc_mapping >> 32;
5046 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5047
5048 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5049 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5050 }
5051
5052 static void
5053 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5054 {
5055 struct tx_bd *txbd;
5056 u32 cid = TX_CID;
5057 struct bnx2_napi *bnapi;
5058 struct bnx2_tx_ring_info *txr;
5059
5060 bnapi = &bp->bnx2_napi[ring_num];
5061 txr = &bnapi->tx_ring;
5062
5063 if (ring_num == 0)
5064 cid = TX_CID;
5065 else
5066 cid = TX_TSS_CID + ring_num - 1;
5067
5068 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5069
5070 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5071
5072 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5073 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5074
5075 txr->tx_prod = 0;
5076 txr->tx_prod_bseq = 0;
5077
5078 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5079 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5080
5081 bnx2_init_tx_context(bp, cid, txr);
5082 }
5083
5084 static void
5085 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5086 int num_rings)
5087 {
5088 int i;
5089 struct rx_bd *rxbd;
5090
5091 for (i = 0; i < num_rings; i++) {
5092 int j;
5093
5094 rxbd = &rx_ring[i][0];
5095 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5096 rxbd->rx_bd_len = buf_size;
5097 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5098 }
5099 if (i == (num_rings - 1))
5100 j = 0;
5101 else
5102 j = i + 1;
5103 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5104 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5105 }
5106 }
5107
5108 static void
5109 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5110 {
5111 int i;
5112 u16 prod, ring_prod;
5113 u32 cid, rx_cid_addr, val;
5114 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5115 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5116
5117 if (ring_num == 0)
5118 cid = RX_CID;
5119 else
5120 cid = RX_RSS_CID + ring_num - 1;
5121
5122 rx_cid_addr = GET_CID_ADDR(cid);
5123
5124 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5125 bp->rx_buf_use_size, bp->rx_max_ring);
5126
5127 bnx2_init_rx_context(bp, cid);
5128
5129 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5130 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5131 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5132 }
5133
5134 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5135 if (bp->rx_pg_ring_size) {
5136 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5137 rxr->rx_pg_desc_mapping,
5138 PAGE_SIZE, bp->rx_max_pg_ring);
5139 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5140 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5141 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5142 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5143
5144 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5145 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5146
5147 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5148 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5149
5150 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5151 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5152 }
5153
5154 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5155 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5156
5157 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5158 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5159
5160 ring_prod = prod = rxr->rx_pg_prod;
5161 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5162 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5163 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5164 ring_num, i, bp->rx_pg_ring_size);
5165 break;
5166 }
5167 prod = NEXT_RX_BD(prod);
5168 ring_prod = RX_PG_RING_IDX(prod);
5169 }
5170 rxr->rx_pg_prod = prod;
5171
5172 ring_prod = prod = rxr->rx_prod;
5173 for (i = 0; i < bp->rx_ring_size; i++) {
5174 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5175 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5176 ring_num, i, bp->rx_ring_size);
5177 break;
5178 }
5179 prod = NEXT_RX_BD(prod);
5180 ring_prod = RX_RING_IDX(prod);
5181 }
5182 rxr->rx_prod = prod;
5183
5184 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5185 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5186 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5187
5188 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5189 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5190
5191 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5192 }
5193
5194 static void
5195 bnx2_init_all_rings(struct bnx2 *bp)
5196 {
5197 int i;
5198 u32 val;
5199
5200 bnx2_clear_ring_states(bp);
5201
5202 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5203 for (i = 0; i < bp->num_tx_rings; i++)
5204 bnx2_init_tx_ring(bp, i);
5205
5206 if (bp->num_tx_rings > 1)
5207 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5208 (TX_TSS_CID << 7));
5209
5210 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5211 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5212
5213 for (i = 0; i < bp->num_rx_rings; i++)
5214 bnx2_init_rx_ring(bp, i);
5215
5216 if (bp->num_rx_rings > 1) {
5217 u32 tbl_32;
5218 u8 *tbl = (u8 *) &tbl_32;
5219
5220 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5221 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5222
5223 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5224 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5225 if ((i % 4) == 3)
5226 bnx2_reg_wr_ind(bp,
5227 BNX2_RXP_SCRATCH_RSS_TBL + i,
5228 cpu_to_be32(tbl_32));
5229 }
5230
5231 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5232 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5233
5234 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5235
5236 }
5237 }
5238
5239 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5240 {
5241 u32 max, num_rings = 1;
5242
5243 while (ring_size > MAX_RX_DESC_CNT) {
5244 ring_size -= MAX_RX_DESC_CNT;
5245 num_rings++;
5246 }
5247 /* round to next power of 2 */
5248 max = max_size;
5249 while ((max & num_rings) == 0)
5250 max >>= 1;
5251
5252 if (num_rings != max)
5253 max <<= 1;
5254
5255 return max;
5256 }
5257
5258 static void
5259 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5260 {
5261 u32 rx_size, rx_space, jumbo_size;
5262
5263 /* 8 for CRC and VLAN */
5264 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5265
5266 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5267 sizeof(struct skb_shared_info);
5268
5269 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5270 bp->rx_pg_ring_size = 0;
5271 bp->rx_max_pg_ring = 0;
5272 bp->rx_max_pg_ring_idx = 0;
5273 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5274 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5275
5276 jumbo_size = size * pages;
5277 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5278 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5279
5280 bp->rx_pg_ring_size = jumbo_size;
5281 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5282 MAX_RX_PG_RINGS);
5283 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5284 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5285 bp->rx_copy_thresh = 0;
5286 }
5287
5288 bp->rx_buf_use_size = rx_size;
5289 /* hw alignment */
5290 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5291 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5292 bp->rx_ring_size = size;
5293 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5294 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5295 }
5296
5297 static void
5298 bnx2_free_tx_skbs(struct bnx2 *bp)
5299 {
5300 int i;
5301
5302 for (i = 0; i < bp->num_tx_rings; i++) {
5303 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5304 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5305 int j;
5306
5307 if (txr->tx_buf_ring == NULL)
5308 continue;
5309
5310 for (j = 0; j < TX_DESC_CNT; ) {
5311 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5312 struct sk_buff *skb = tx_buf->skb;
5313 int k, last;
5314
5315 if (skb == NULL) {
5316 j++;
5317 continue;
5318 }
5319
5320 pci_unmap_single(bp->pdev,
5321 pci_unmap_addr(tx_buf, mapping),
5322 skb_headlen(skb),
5323 PCI_DMA_TODEVICE);
5324
5325 tx_buf->skb = NULL;
5326
5327 last = tx_buf->nr_frags;
5328 j++;
5329 for (k = 0; k < last; k++, j++) {
5330 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5331 pci_unmap_page(bp->pdev,
5332 pci_unmap_addr(tx_buf, mapping),
5333 skb_shinfo(skb)->frags[k].size,
5334 PCI_DMA_TODEVICE);
5335 }
5336 dev_kfree_skb(skb);
5337 }
5338 }
5339 }
5340
5341 static void
5342 bnx2_free_rx_skbs(struct bnx2 *bp)
5343 {
5344 int i;
5345
5346 for (i = 0; i < bp->num_rx_rings; i++) {
5347 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5348 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5349 int j;
5350
5351 if (rxr->rx_buf_ring == NULL)
5352 return;
5353
5354 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5355 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5356 struct sk_buff *skb = rx_buf->skb;
5357
5358 if (skb == NULL)
5359 continue;
5360
5361 pci_unmap_single(bp->pdev,
5362 pci_unmap_addr(rx_buf, mapping),
5363 bp->rx_buf_use_size,
5364 PCI_DMA_FROMDEVICE);
5365
5366 rx_buf->skb = NULL;
5367
5368 dev_kfree_skb(skb);
5369 }
5370 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5371 bnx2_free_rx_page(bp, rxr, j);
5372 }
5373 }
5374
5375 static void
5376 bnx2_free_skbs(struct bnx2 *bp)
5377 {
5378 bnx2_free_tx_skbs(bp);
5379 bnx2_free_rx_skbs(bp);
5380 }
5381
5382 static int
5383 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5384 {
5385 int rc;
5386
5387 rc = bnx2_reset_chip(bp, reset_code);
5388 bnx2_free_skbs(bp);
5389 if (rc)
5390 return rc;
5391
5392 if ((rc = bnx2_init_chip(bp)) != 0)
5393 return rc;
5394
5395 bnx2_init_all_rings(bp);
5396 return 0;
5397 }
5398
5399 static int
5400 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5401 {
5402 int rc;
5403
5404 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5405 return rc;
5406
5407 spin_lock_bh(&bp->phy_lock);
5408 bnx2_init_phy(bp, reset_phy);
5409 bnx2_set_link(bp);
5410 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5411 bnx2_remote_phy_event(bp);
5412 spin_unlock_bh(&bp->phy_lock);
5413 return 0;
5414 }
5415
5416 static int
5417 bnx2_shutdown_chip(struct bnx2 *bp)
5418 {
5419 u32 reset_code;
5420
5421 if (bp->flags & BNX2_FLAG_NO_WOL)
5422 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5423 else if (bp->wol)
5424 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5425 else
5426 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5427
5428 return bnx2_reset_chip(bp, reset_code);
5429 }
5430
5431 static int
5432 bnx2_test_registers(struct bnx2 *bp)
5433 {
5434 int ret;
5435 int i, is_5709;
5436 static const struct {
5437 u16 offset;
5438 u16 flags;
5439 #define BNX2_FL_NOT_5709 1
5440 u32 rw_mask;
5441 u32 ro_mask;
5442 } reg_tbl[] = {
5443 { 0x006c, 0, 0x00000000, 0x0000003f },
5444 { 0x0090, 0, 0xffffffff, 0x00000000 },
5445 { 0x0094, 0, 0x00000000, 0x00000000 },
5446
5447 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5448 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5449 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5450 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5451 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5452 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5453 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5454 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5455 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5456
5457 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5458 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5459 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5460 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5461 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5462 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5463
5464 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5465 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5466 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5467
5468 { 0x1000, 0, 0x00000000, 0x00000001 },
5469 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5470
5471 { 0x1408, 0, 0x01c00800, 0x00000000 },
5472 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5473 { 0x14a8, 0, 0x00000000, 0x000001ff },
5474 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5475 { 0x14b0, 0, 0x00000002, 0x00000001 },
5476 { 0x14b8, 0, 0x00000000, 0x00000000 },
5477 { 0x14c0, 0, 0x00000000, 0x00000009 },
5478 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5479 { 0x14cc, 0, 0x00000000, 0x00000001 },
5480 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5481
5482 { 0x1800, 0, 0x00000000, 0x00000001 },
5483 { 0x1804, 0, 0x00000000, 0x00000003 },
5484
5485 { 0x2800, 0, 0x00000000, 0x00000001 },
5486 { 0x2804, 0, 0x00000000, 0x00003f01 },
5487 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5488 { 0x2810, 0, 0xffff0000, 0x00000000 },
5489 { 0x2814, 0, 0xffff0000, 0x00000000 },
5490 { 0x2818, 0, 0xffff0000, 0x00000000 },
5491 { 0x281c, 0, 0xffff0000, 0x00000000 },
5492 { 0x2834, 0, 0xffffffff, 0x00000000 },
5493 { 0x2840, 0, 0x00000000, 0xffffffff },
5494 { 0x2844, 0, 0x00000000, 0xffffffff },
5495 { 0x2848, 0, 0xffffffff, 0x00000000 },
5496 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5497
5498 { 0x2c00, 0, 0x00000000, 0x00000011 },
5499 { 0x2c04, 0, 0x00000000, 0x00030007 },
5500
5501 { 0x3c00, 0, 0x00000000, 0x00000001 },
5502 { 0x3c04, 0, 0x00000000, 0x00070000 },
5503 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5504 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5505 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5506 { 0x3c14, 0, 0x00000000, 0xffffffff },
5507 { 0x3c18, 0, 0x00000000, 0xffffffff },
5508 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5509 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5510
5511 { 0x5004, 0, 0x00000000, 0x0000007f },
5512 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5513
5514 { 0x5c00, 0, 0x00000000, 0x00000001 },
5515 { 0x5c04, 0, 0x00000000, 0x0003000f },
5516 { 0x5c08, 0, 0x00000003, 0x00000000 },
5517 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5518 { 0x5c10, 0, 0x00000000, 0xffffffff },
5519 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5520 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5521 { 0x5c88, 0, 0x00000000, 0x00077373 },
5522 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5523
5524 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5525 { 0x680c, 0, 0xffffffff, 0x00000000 },
5526 { 0x6810, 0, 0xffffffff, 0x00000000 },
5527 { 0x6814, 0, 0xffffffff, 0x00000000 },
5528 { 0x6818, 0, 0xffffffff, 0x00000000 },
5529 { 0x681c, 0, 0xffffffff, 0x00000000 },
5530 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5531 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5532 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5533 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5534 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5535 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5536 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5537 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5538 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5539 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5540 { 0x684c, 0, 0xffffffff, 0x00000000 },
5541 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5542 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5543 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5544 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5545 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5546 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5547
5548 { 0xffff, 0, 0x00000000, 0x00000000 },
5549 };
5550
5551 ret = 0;
5552 is_5709 = 0;
5553 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5554 is_5709 = 1;
5555
5556 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5557 u32 offset, rw_mask, ro_mask, save_val, val;
5558 u16 flags = reg_tbl[i].flags;
5559
5560 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5561 continue;
5562
5563 offset = (u32) reg_tbl[i].offset;
5564 rw_mask = reg_tbl[i].rw_mask;
5565 ro_mask = reg_tbl[i].ro_mask;
5566
5567 save_val = readl(bp->regview + offset);
5568
5569 writel(0, bp->regview + offset);
5570
5571 val = readl(bp->regview + offset);
5572 if ((val & rw_mask) != 0) {
5573 goto reg_test_err;
5574 }
5575
5576 if ((val & ro_mask) != (save_val & ro_mask)) {
5577 goto reg_test_err;
5578 }
5579
5580 writel(0xffffffff, bp->regview + offset);
5581
5582 val = readl(bp->regview + offset);
5583 if ((val & rw_mask) != rw_mask) {
5584 goto reg_test_err;
5585 }
5586
5587 if ((val & ro_mask) != (save_val & ro_mask)) {
5588 goto reg_test_err;
5589 }
5590
5591 writel(save_val, bp->regview + offset);
5592 continue;
5593
5594 reg_test_err:
5595 writel(save_val, bp->regview + offset);
5596 ret = -ENODEV;
5597 break;
5598 }
5599 return ret;
5600 }
5601
5602 static int
5603 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5604 {
5605 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5606 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5607 int i;
5608
5609 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5610 u32 offset;
5611
5612 for (offset = 0; offset < size; offset += 4) {
5613
5614 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5615
5616 if (bnx2_reg_rd_ind(bp, start + offset) !=
5617 test_pattern[i]) {
5618 return -ENODEV;
5619 }
5620 }
5621 }
5622 return 0;
5623 }
5624
5625 static int
5626 bnx2_test_memory(struct bnx2 *bp)
5627 {
5628 int ret = 0;
5629 int i;
5630 static struct mem_entry {
5631 u32 offset;
5632 u32 len;
5633 } mem_tbl_5706[] = {
5634 { 0x60000, 0x4000 },
5635 { 0xa0000, 0x3000 },
5636 { 0xe0000, 0x4000 },
5637 { 0x120000, 0x4000 },
5638 { 0x1a0000, 0x4000 },
5639 { 0x160000, 0x4000 },
5640 { 0xffffffff, 0 },
5641 },
5642 mem_tbl_5709[] = {
5643 { 0x60000, 0x4000 },
5644 { 0xa0000, 0x3000 },
5645 { 0xe0000, 0x4000 },
5646 { 0x120000, 0x4000 },
5647 { 0x1a0000, 0x4000 },
5648 { 0xffffffff, 0 },
5649 };
5650 struct mem_entry *mem_tbl;
5651
5652 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5653 mem_tbl = mem_tbl_5709;
5654 else
5655 mem_tbl = mem_tbl_5706;
5656
5657 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5658 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5659 mem_tbl[i].len)) != 0) {
5660 return ret;
5661 }
5662 }
5663
5664 return ret;
5665 }
5666
5667 #define BNX2_MAC_LOOPBACK 0
5668 #define BNX2_PHY_LOOPBACK 1
5669
5670 static int
5671 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5672 {
5673 unsigned int pkt_size, num_pkts, i;
5674 struct sk_buff *skb, *rx_skb;
5675 unsigned char *packet;
5676 u16 rx_start_idx, rx_idx;
5677 dma_addr_t map;
5678 struct tx_bd *txbd;
5679 struct sw_bd *rx_buf;
5680 struct l2_fhdr *rx_hdr;
5681 int ret = -ENODEV;
5682 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5683 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5684 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5685
5686 tx_napi = bnapi;
5687
5688 txr = &tx_napi->tx_ring;
5689 rxr = &bnapi->rx_ring;
5690 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5691 bp->loopback = MAC_LOOPBACK;
5692 bnx2_set_mac_loopback(bp);
5693 }
5694 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5695 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5696 return 0;
5697
5698 bp->loopback = PHY_LOOPBACK;
5699 bnx2_set_phy_loopback(bp);
5700 }
5701 else
5702 return -EINVAL;
5703
5704 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5705 skb = netdev_alloc_skb(bp->dev, pkt_size);
5706 if (!skb)
5707 return -ENOMEM;
5708 packet = skb_put(skb, pkt_size);
5709 memcpy(packet, bp->dev->dev_addr, 6);
5710 memset(packet + 6, 0x0, 8);
5711 for (i = 14; i < pkt_size; i++)
5712 packet[i] = (unsigned char) (i & 0xff);
5713
5714 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5715 PCI_DMA_TODEVICE);
5716 if (pci_dma_mapping_error(bp->pdev, map)) {
5717 dev_kfree_skb(skb);
5718 return -EIO;
5719 }
5720
5721 REG_WR(bp, BNX2_HC_COMMAND,
5722 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5723
5724 REG_RD(bp, BNX2_HC_COMMAND);
5725
5726 udelay(5);
5727 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5728
5729 num_pkts = 0;
5730
5731 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5732
5733 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5734 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5735 txbd->tx_bd_mss_nbytes = pkt_size;
5736 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5737
5738 num_pkts++;
5739 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5740 txr->tx_prod_bseq += pkt_size;
5741
5742 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5743 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5744
5745 udelay(100);
5746
5747 REG_WR(bp, BNX2_HC_COMMAND,
5748 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5749
5750 REG_RD(bp, BNX2_HC_COMMAND);
5751
5752 udelay(5);
5753
5754 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5755 dev_kfree_skb(skb);
5756
5757 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5758 goto loopback_test_done;
5759
5760 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5761 if (rx_idx != rx_start_idx + num_pkts) {
5762 goto loopback_test_done;
5763 }
5764
5765 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5766 rx_skb = rx_buf->skb;
5767
5768 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5769 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5770
5771 pci_dma_sync_single_for_cpu(bp->pdev,
5772 pci_unmap_addr(rx_buf, mapping),
5773 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5774
5775 if (rx_hdr->l2_fhdr_status &
5776 (L2_FHDR_ERRORS_BAD_CRC |
5777 L2_FHDR_ERRORS_PHY_DECODE |
5778 L2_FHDR_ERRORS_ALIGNMENT |
5779 L2_FHDR_ERRORS_TOO_SHORT |
5780 L2_FHDR_ERRORS_GIANT_FRAME)) {
5781
5782 goto loopback_test_done;
5783 }
5784
5785 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5786 goto loopback_test_done;
5787 }
5788
5789 for (i = 14; i < pkt_size; i++) {
5790 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5791 goto loopback_test_done;
5792 }
5793 }
5794
5795 ret = 0;
5796
5797 loopback_test_done:
5798 bp->loopback = 0;
5799 return ret;
5800 }
5801
5802 #define BNX2_MAC_LOOPBACK_FAILED 1
5803 #define BNX2_PHY_LOOPBACK_FAILED 2
5804 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5805 BNX2_PHY_LOOPBACK_FAILED)
5806
5807 static int
5808 bnx2_test_loopback(struct bnx2 *bp)
5809 {
5810 int rc = 0;
5811
5812 if (!netif_running(bp->dev))
5813 return BNX2_LOOPBACK_FAILED;
5814
5815 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5816 spin_lock_bh(&bp->phy_lock);
5817 bnx2_init_phy(bp, 1);
5818 spin_unlock_bh(&bp->phy_lock);
5819 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5820 rc |= BNX2_MAC_LOOPBACK_FAILED;
5821 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5822 rc |= BNX2_PHY_LOOPBACK_FAILED;
5823 return rc;
5824 }
5825
5826 #define NVRAM_SIZE 0x200
5827 #define CRC32_RESIDUAL 0xdebb20e3
5828
5829 static int
5830 bnx2_test_nvram(struct bnx2 *bp)
5831 {
5832 __be32 buf[NVRAM_SIZE / 4];
5833 u8 *data = (u8 *) buf;
5834 int rc = 0;
5835 u32 magic, csum;
5836
5837 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5838 goto test_nvram_done;
5839
5840 magic = be32_to_cpu(buf[0]);
5841 if (magic != 0x669955aa) {
5842 rc = -ENODEV;
5843 goto test_nvram_done;
5844 }
5845
5846 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5847 goto test_nvram_done;
5848
5849 csum = ether_crc_le(0x100, data);
5850 if (csum != CRC32_RESIDUAL) {
5851 rc = -ENODEV;
5852 goto test_nvram_done;
5853 }
5854
5855 csum = ether_crc_le(0x100, data + 0x100);
5856 if (csum != CRC32_RESIDUAL) {
5857 rc = -ENODEV;
5858 }
5859
5860 test_nvram_done:
5861 return rc;
5862 }
5863
5864 static int
5865 bnx2_test_link(struct bnx2 *bp)
5866 {
5867 u32 bmsr;
5868
5869 if (!netif_running(bp->dev))
5870 return -ENODEV;
5871
5872 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5873 if (bp->link_up)
5874 return 0;
5875 return -ENODEV;
5876 }
5877 spin_lock_bh(&bp->phy_lock);
5878 bnx2_enable_bmsr1(bp);
5879 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5880 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5881 bnx2_disable_bmsr1(bp);
5882 spin_unlock_bh(&bp->phy_lock);
5883
5884 if (bmsr & BMSR_LSTATUS) {
5885 return 0;
5886 }
5887 return -ENODEV;
5888 }
5889
5890 static int
5891 bnx2_test_intr(struct bnx2 *bp)
5892 {
5893 int i;
5894 u16 status_idx;
5895
5896 if (!netif_running(bp->dev))
5897 return -ENODEV;
5898
5899 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5900
5901 /* This register is not touched during run-time. */
5902 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5903 REG_RD(bp, BNX2_HC_COMMAND);
5904
5905 for (i = 0; i < 10; i++) {
5906 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5907 status_idx) {
5908
5909 break;
5910 }
5911
5912 msleep_interruptible(10);
5913 }
5914 if (i < 10)
5915 return 0;
5916
5917 return -ENODEV;
5918 }
5919
5920 /* Determining link for parallel detection. */
5921 static int
5922 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5923 {
5924 u32 mode_ctl, an_dbg, exp;
5925
5926 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5927 return 0;
5928
5929 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5930 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5931
5932 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5933 return 0;
5934
5935 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5936 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5937 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5938
5939 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5940 return 0;
5941
5942 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5943 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5944 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5945
5946 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5947 return 0;
5948
5949 return 1;
5950 }
5951
5952 static void
5953 bnx2_5706_serdes_timer(struct bnx2 *bp)
5954 {
5955 int check_link = 1;
5956
5957 spin_lock(&bp->phy_lock);
5958 if (bp->serdes_an_pending) {
5959 bp->serdes_an_pending--;
5960 check_link = 0;
5961 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5962 u32 bmcr;
5963
5964 bp->current_interval = BNX2_TIMER_INTERVAL;
5965
5966 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5967
5968 if (bmcr & BMCR_ANENABLE) {
5969 if (bnx2_5706_serdes_has_link(bp)) {
5970 bmcr &= ~BMCR_ANENABLE;
5971 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5972 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5973 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5974 }
5975 }
5976 }
5977 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5978 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5979 u32 phy2;
5980
5981 bnx2_write_phy(bp, 0x17, 0x0f01);
5982 bnx2_read_phy(bp, 0x15, &phy2);
5983 if (phy2 & 0x20) {
5984 u32 bmcr;
5985
5986 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5987 bmcr |= BMCR_ANENABLE;
5988 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5989
5990 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5991 }
5992 } else
5993 bp->current_interval = BNX2_TIMER_INTERVAL;
5994
5995 if (check_link) {
5996 u32 val;
5997
5998 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5999 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6000 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6001
6002 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6003 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6004 bnx2_5706s_force_link_dn(bp, 1);
6005 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6006 } else
6007 bnx2_set_link(bp);
6008 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6009 bnx2_set_link(bp);
6010 }
6011 spin_unlock(&bp->phy_lock);
6012 }
6013
6014 static void
6015 bnx2_5708_serdes_timer(struct bnx2 *bp)
6016 {
6017 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6018 return;
6019
6020 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6021 bp->serdes_an_pending = 0;
6022 return;
6023 }
6024
6025 spin_lock(&bp->phy_lock);
6026 if (bp->serdes_an_pending)
6027 bp->serdes_an_pending--;
6028 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6029 u32 bmcr;
6030
6031 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6032 if (bmcr & BMCR_ANENABLE) {
6033 bnx2_enable_forced_2g5(bp);
6034 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6035 } else {
6036 bnx2_disable_forced_2g5(bp);
6037 bp->serdes_an_pending = 2;
6038 bp->current_interval = BNX2_TIMER_INTERVAL;
6039 }
6040
6041 } else
6042 bp->current_interval = BNX2_TIMER_INTERVAL;
6043
6044 spin_unlock(&bp->phy_lock);
6045 }
6046
6047 static void
6048 bnx2_timer(unsigned long data)
6049 {
6050 struct bnx2 *bp = (struct bnx2 *) data;
6051
6052 if (!netif_running(bp->dev))
6053 return;
6054
6055 if (atomic_read(&bp->intr_sem) != 0)
6056 goto bnx2_restart_timer;
6057
6058 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6059 BNX2_FLAG_USING_MSI)
6060 bnx2_chk_missed_msi(bp);
6061
6062 bnx2_send_heart_beat(bp);
6063
6064 bp->stats_blk->stat_FwRxDrop =
6065 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6066
6067 /* workaround occasional corrupted counters */
6068 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6069 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6070 BNX2_HC_COMMAND_STATS_NOW);
6071
6072 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6073 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6074 bnx2_5706_serdes_timer(bp);
6075 else
6076 bnx2_5708_serdes_timer(bp);
6077 }
6078
6079 bnx2_restart_timer:
6080 mod_timer(&bp->timer, jiffies + bp->current_interval);
6081 }
6082
6083 static int
6084 bnx2_request_irq(struct bnx2 *bp)
6085 {
6086 unsigned long flags;
6087 struct bnx2_irq *irq;
6088 int rc = 0, i;
6089
6090 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6091 flags = 0;
6092 else
6093 flags = IRQF_SHARED;
6094
6095 for (i = 0; i < bp->irq_nvecs; i++) {
6096 irq = &bp->irq_tbl[i];
6097 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6098 &bp->bnx2_napi[i]);
6099 if (rc)
6100 break;
6101 irq->requested = 1;
6102 }
6103 return rc;
6104 }
6105
6106 static void
6107 bnx2_free_irq(struct bnx2 *bp)
6108 {
6109 struct bnx2_irq *irq;
6110 int i;
6111
6112 for (i = 0; i < bp->irq_nvecs; i++) {
6113 irq = &bp->irq_tbl[i];
6114 if (irq->requested)
6115 free_irq(irq->vector, &bp->bnx2_napi[i]);
6116 irq->requested = 0;
6117 }
6118 if (bp->flags & BNX2_FLAG_USING_MSI)
6119 pci_disable_msi(bp->pdev);
6120 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6121 pci_disable_msix(bp->pdev);
6122
6123 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6124 }
6125
6126 static void
6127 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6128 {
6129 int i, rc;
6130 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6131 struct net_device *dev = bp->dev;
6132 const int len = sizeof(bp->irq_tbl[0].name);
6133
6134 bnx2_setup_msix_tbl(bp);
6135 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6136 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6137 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6138
6139 /* Need to flush the previous three writes to ensure MSI-X
6140 * is setup properly */
6141 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6142
6143 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6144 msix_ent[i].entry = i;
6145 msix_ent[i].vector = 0;
6146 }
6147
6148 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6149 if (rc != 0)
6150 return;
6151
6152 bp->irq_nvecs = msix_vecs;
6153 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6154 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6155 bp->irq_tbl[i].vector = msix_ent[i].vector;
6156 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6157 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6158 }
6159 }
6160
6161 static void
6162 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6163 {
6164 int cpus = num_online_cpus();
6165 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6166
6167 bp->irq_tbl[0].handler = bnx2_interrupt;
6168 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6169 bp->irq_nvecs = 1;
6170 bp->irq_tbl[0].vector = bp->pdev->irq;
6171
6172 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6173 bnx2_enable_msix(bp, msix_vecs);
6174
6175 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6176 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6177 if (pci_enable_msi(bp->pdev) == 0) {
6178 bp->flags |= BNX2_FLAG_USING_MSI;
6179 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6180 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6181 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6182 } else
6183 bp->irq_tbl[0].handler = bnx2_msi;
6184
6185 bp->irq_tbl[0].vector = bp->pdev->irq;
6186 }
6187 }
6188
6189 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6190 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6191
6192 bp->num_rx_rings = bp->irq_nvecs;
6193 }
6194
6195 /* Called with rtnl_lock */
6196 static int
6197 bnx2_open(struct net_device *dev)
6198 {
6199 struct bnx2 *bp = netdev_priv(dev);
6200 int rc;
6201
6202 netif_carrier_off(dev);
6203
6204 bnx2_set_power_state(bp, PCI_D0);
6205 bnx2_disable_int(bp);
6206
6207 bnx2_setup_int_mode(bp, disable_msi);
6208 bnx2_init_napi(bp);
6209 bnx2_napi_enable(bp);
6210 rc = bnx2_alloc_mem(bp);
6211 if (rc)
6212 goto open_err;
6213
6214 rc = bnx2_request_irq(bp);
6215 if (rc)
6216 goto open_err;
6217
6218 rc = bnx2_init_nic(bp, 1);
6219 if (rc)
6220 goto open_err;
6221
6222 mod_timer(&bp->timer, jiffies + bp->current_interval);
6223
6224 atomic_set(&bp->intr_sem, 0);
6225
6226 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6227
6228 bnx2_enable_int(bp);
6229
6230 if (bp->flags & BNX2_FLAG_USING_MSI) {
6231 /* Test MSI to make sure it is working
6232 * If MSI test fails, go back to INTx mode
6233 */
6234 if (bnx2_test_intr(bp) != 0) {
6235 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6236
6237 bnx2_disable_int(bp);
6238 bnx2_free_irq(bp);
6239
6240 bnx2_setup_int_mode(bp, 1);
6241
6242 rc = bnx2_init_nic(bp, 0);
6243
6244 if (!rc)
6245 rc = bnx2_request_irq(bp);
6246
6247 if (rc) {
6248 del_timer_sync(&bp->timer);
6249 goto open_err;
6250 }
6251 bnx2_enable_int(bp);
6252 }
6253 }
6254 if (bp->flags & BNX2_FLAG_USING_MSI)
6255 netdev_info(dev, "using MSI\n");
6256 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6257 netdev_info(dev, "using MSIX\n");
6258
6259 netif_tx_start_all_queues(dev);
6260
6261 return 0;
6262
6263 open_err:
6264 bnx2_napi_disable(bp);
6265 bnx2_free_skbs(bp);
6266 bnx2_free_irq(bp);
6267 bnx2_free_mem(bp);
6268 return rc;
6269 }
6270
6271 static void
6272 bnx2_reset_task(struct work_struct *work)
6273 {
6274 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6275
6276 rtnl_lock();
6277 if (!netif_running(bp->dev)) {
6278 rtnl_unlock();
6279 return;
6280 }
6281
6282 bnx2_netif_stop(bp, true);
6283
6284 bnx2_init_nic(bp, 1);
6285
6286 atomic_set(&bp->intr_sem, 1);
6287 bnx2_netif_start(bp, true);
6288 rtnl_unlock();
6289 }
6290
6291 static void
6292 bnx2_dump_state(struct bnx2 *bp)
6293 {
6294 struct net_device *dev = bp->dev;
6295
6296 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6297 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6298 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6299 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6300 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6301 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6302 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6303 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6304 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6305 if (bp->flags & BNX2_FLAG_USING_MSIX)
6306 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6307 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6308 }
6309
6310 static void
6311 bnx2_tx_timeout(struct net_device *dev)
6312 {
6313 struct bnx2 *bp = netdev_priv(dev);
6314
6315 bnx2_dump_state(bp);
6316
6317 /* This allows the netif to be shutdown gracefully before resetting */
6318 schedule_work(&bp->reset_task);
6319 }
6320
6321 #ifdef BCM_VLAN
6322 /* Called with rtnl_lock */
6323 static void
6324 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6325 {
6326 struct bnx2 *bp = netdev_priv(dev);
6327
6328 if (netif_running(dev))
6329 bnx2_netif_stop(bp, false);
6330
6331 bp->vlgrp = vlgrp;
6332
6333 if (!netif_running(dev))
6334 return;
6335
6336 bnx2_set_rx_mode(dev);
6337 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6338 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6339
6340 bnx2_netif_start(bp, false);
6341 }
6342 #endif
6343
6344 /* Called with netif_tx_lock.
6345 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6346 * netif_wake_queue().
6347 */
6348 static netdev_tx_t
6349 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6350 {
6351 struct bnx2 *bp = netdev_priv(dev);
6352 dma_addr_t mapping;
6353 struct tx_bd *txbd;
6354 struct sw_tx_bd *tx_buf;
6355 u32 len, vlan_tag_flags, last_frag, mss;
6356 u16 prod, ring_prod;
6357 int i;
6358 struct bnx2_napi *bnapi;
6359 struct bnx2_tx_ring_info *txr;
6360 struct netdev_queue *txq;
6361
6362 /* Determine which tx ring we will be placed on */
6363 i = skb_get_queue_mapping(skb);
6364 bnapi = &bp->bnx2_napi[i];
6365 txr = &bnapi->tx_ring;
6366 txq = netdev_get_tx_queue(dev, i);
6367
6368 if (unlikely(bnx2_tx_avail(bp, txr) <
6369 (skb_shinfo(skb)->nr_frags + 1))) {
6370 netif_tx_stop_queue(txq);
6371 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6372
6373 return NETDEV_TX_BUSY;
6374 }
6375 len = skb_headlen(skb);
6376 prod = txr->tx_prod;
6377 ring_prod = TX_RING_IDX(prod);
6378
6379 vlan_tag_flags = 0;
6380 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6381 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6382 }
6383
6384 #ifdef BCM_VLAN
6385 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6386 vlan_tag_flags |=
6387 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6388 }
6389 #endif
6390 if ((mss = skb_shinfo(skb)->gso_size)) {
6391 u32 tcp_opt_len;
6392 struct iphdr *iph;
6393
6394 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6395
6396 tcp_opt_len = tcp_optlen(skb);
6397
6398 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6399 u32 tcp_off = skb_transport_offset(skb) -
6400 sizeof(struct ipv6hdr) - ETH_HLEN;
6401
6402 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6403 TX_BD_FLAGS_SW_FLAGS;
6404 if (likely(tcp_off == 0))
6405 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6406 else {
6407 tcp_off >>= 3;
6408 vlan_tag_flags |= ((tcp_off & 0x3) <<
6409 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6410 ((tcp_off & 0x10) <<
6411 TX_BD_FLAGS_TCP6_OFF4_SHL);
6412 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6413 }
6414 } else {
6415 iph = ip_hdr(skb);
6416 if (tcp_opt_len || (iph->ihl > 5)) {
6417 vlan_tag_flags |= ((iph->ihl - 5) +
6418 (tcp_opt_len >> 2)) << 8;
6419 }
6420 }
6421 } else
6422 mss = 0;
6423
6424 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6425 if (pci_dma_mapping_error(bp->pdev, mapping)) {
6426 dev_kfree_skb(skb);
6427 return NETDEV_TX_OK;
6428 }
6429
6430 tx_buf = &txr->tx_buf_ring[ring_prod];
6431 tx_buf->skb = skb;
6432 pci_unmap_addr_set(tx_buf, mapping, mapping);
6433
6434 txbd = &txr->tx_desc_ring[ring_prod];
6435
6436 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6437 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6438 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6439 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6440
6441 last_frag = skb_shinfo(skb)->nr_frags;
6442 tx_buf->nr_frags = last_frag;
6443 tx_buf->is_gso = skb_is_gso(skb);
6444
6445 for (i = 0; i < last_frag; i++) {
6446 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6447
6448 prod = NEXT_TX_BD(prod);
6449 ring_prod = TX_RING_IDX(prod);
6450 txbd = &txr->tx_desc_ring[ring_prod];
6451
6452 len = frag->size;
6453 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6454 len, PCI_DMA_TODEVICE);
6455 if (pci_dma_mapping_error(bp->pdev, mapping))
6456 goto dma_error;
6457 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6458 mapping);
6459
6460 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6461 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6462 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6463 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6464
6465 }
6466 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6467
6468 prod = NEXT_TX_BD(prod);
6469 txr->tx_prod_bseq += skb->len;
6470
6471 REG_WR16(bp, txr->tx_bidx_addr, prod);
6472 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6473
6474 mmiowb();
6475
6476 txr->tx_prod = prod;
6477
6478 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6479 netif_tx_stop_queue(txq);
6480 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6481 netif_tx_wake_queue(txq);
6482 }
6483
6484 return NETDEV_TX_OK;
6485 dma_error:
6486 /* save value of frag that failed */
6487 last_frag = i;
6488
6489 /* start back at beginning and unmap skb */
6490 prod = txr->tx_prod;
6491 ring_prod = TX_RING_IDX(prod);
6492 tx_buf = &txr->tx_buf_ring[ring_prod];
6493 tx_buf->skb = NULL;
6494 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6495 skb_headlen(skb), PCI_DMA_TODEVICE);
6496
6497 /* unmap remaining mapped pages */
6498 for (i = 0; i < last_frag; i++) {
6499 prod = NEXT_TX_BD(prod);
6500 ring_prod = TX_RING_IDX(prod);
6501 tx_buf = &txr->tx_buf_ring[ring_prod];
6502 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6503 skb_shinfo(skb)->frags[i].size,
6504 PCI_DMA_TODEVICE);
6505 }
6506
6507 dev_kfree_skb(skb);
6508 return NETDEV_TX_OK;
6509 }
6510
6511 /* Called with rtnl_lock */
6512 static int
6513 bnx2_close(struct net_device *dev)
6514 {
6515 struct bnx2 *bp = netdev_priv(dev);
6516
6517 cancel_work_sync(&bp->reset_task);
6518
6519 bnx2_disable_int_sync(bp);
6520 bnx2_napi_disable(bp);
6521 del_timer_sync(&bp->timer);
6522 bnx2_shutdown_chip(bp);
6523 bnx2_free_irq(bp);
6524 bnx2_free_skbs(bp);
6525 bnx2_free_mem(bp);
6526 bp->link_up = 0;
6527 netif_carrier_off(bp->dev);
6528 bnx2_set_power_state(bp, PCI_D3hot);
6529 return 0;
6530 }
6531
6532 static void
6533 bnx2_save_stats(struct bnx2 *bp)
6534 {
6535 u32 *hw_stats = (u32 *) bp->stats_blk;
6536 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6537 int i;
6538
6539 /* The 1st 10 counters are 64-bit counters */
6540 for (i = 0; i < 20; i += 2) {
6541 u32 hi;
6542 u64 lo;
6543
6544 hi = temp_stats[i] + hw_stats[i];
6545 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6546 if (lo > 0xffffffff)
6547 hi++;
6548 temp_stats[i] = hi;
6549 temp_stats[i + 1] = lo & 0xffffffff;
6550 }
6551
6552 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6553 temp_stats[i] += hw_stats[i];
6554 }
6555
6556 #define GET_64BIT_NET_STATS64(ctr) \
6557 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6558 (unsigned long) (ctr##_lo)
6559
6560 #define GET_64BIT_NET_STATS32(ctr) \
6561 (ctr##_lo)
6562
6563 #if (BITS_PER_LONG == 64)
6564 #define GET_64BIT_NET_STATS(ctr) \
6565 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6566 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6567 #else
6568 #define GET_64BIT_NET_STATS(ctr) \
6569 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6570 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6571 #endif
6572
6573 #define GET_32BIT_NET_STATS(ctr) \
6574 (unsigned long) (bp->stats_blk->ctr + \
6575 bp->temp_stats_blk->ctr)
6576
6577 static struct net_device_stats *
6578 bnx2_get_stats(struct net_device *dev)
6579 {
6580 struct bnx2 *bp = netdev_priv(dev);
6581 struct net_device_stats *net_stats = &dev->stats;
6582
6583 if (bp->stats_blk == NULL) {
6584 return net_stats;
6585 }
6586 net_stats->rx_packets =
6587 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6588 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6589 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6590
6591 net_stats->tx_packets =
6592 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6593 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6594 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6595
6596 net_stats->rx_bytes =
6597 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6598
6599 net_stats->tx_bytes =
6600 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6601
6602 net_stats->multicast =
6603 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6604
6605 net_stats->collisions =
6606 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6607
6608 net_stats->rx_length_errors =
6609 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6610 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6611
6612 net_stats->rx_over_errors =
6613 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6614 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6615
6616 net_stats->rx_frame_errors =
6617 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6618
6619 net_stats->rx_crc_errors =
6620 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6621
6622 net_stats->rx_errors = net_stats->rx_length_errors +
6623 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6624 net_stats->rx_crc_errors;
6625
6626 net_stats->tx_aborted_errors =
6627 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6628 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6629
6630 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6631 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6632 net_stats->tx_carrier_errors = 0;
6633 else {
6634 net_stats->tx_carrier_errors =
6635 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6636 }
6637
6638 net_stats->tx_errors =
6639 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6640 net_stats->tx_aborted_errors +
6641 net_stats->tx_carrier_errors;
6642
6643 net_stats->rx_missed_errors =
6644 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6645 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6646 GET_32BIT_NET_STATS(stat_FwRxDrop);
6647
6648 return net_stats;
6649 }
6650
6651 /* All ethtool functions called with rtnl_lock */
6652
6653 static int
6654 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6655 {
6656 struct bnx2 *bp = netdev_priv(dev);
6657 int support_serdes = 0, support_copper = 0;
6658
6659 cmd->supported = SUPPORTED_Autoneg;
6660 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6661 support_serdes = 1;
6662 support_copper = 1;
6663 } else if (bp->phy_port == PORT_FIBRE)
6664 support_serdes = 1;
6665 else
6666 support_copper = 1;
6667
6668 if (support_serdes) {
6669 cmd->supported |= SUPPORTED_1000baseT_Full |
6670 SUPPORTED_FIBRE;
6671 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6672 cmd->supported |= SUPPORTED_2500baseX_Full;
6673
6674 }
6675 if (support_copper) {
6676 cmd->supported |= SUPPORTED_10baseT_Half |
6677 SUPPORTED_10baseT_Full |
6678 SUPPORTED_100baseT_Half |
6679 SUPPORTED_100baseT_Full |
6680 SUPPORTED_1000baseT_Full |
6681 SUPPORTED_TP;
6682
6683 }
6684
6685 spin_lock_bh(&bp->phy_lock);
6686 cmd->port = bp->phy_port;
6687 cmd->advertising = bp->advertising;
6688
6689 if (bp->autoneg & AUTONEG_SPEED) {
6690 cmd->autoneg = AUTONEG_ENABLE;
6691 }
6692 else {
6693 cmd->autoneg = AUTONEG_DISABLE;
6694 }
6695
6696 if (netif_carrier_ok(dev)) {
6697 cmd->speed = bp->line_speed;
6698 cmd->duplex = bp->duplex;
6699 }
6700 else {
6701 cmd->speed = -1;
6702 cmd->duplex = -1;
6703 }
6704 spin_unlock_bh(&bp->phy_lock);
6705
6706 cmd->transceiver = XCVR_INTERNAL;
6707 cmd->phy_address = bp->phy_addr;
6708
6709 return 0;
6710 }
6711
6712 static int
6713 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6714 {
6715 struct bnx2 *bp = netdev_priv(dev);
6716 u8 autoneg = bp->autoneg;
6717 u8 req_duplex = bp->req_duplex;
6718 u16 req_line_speed = bp->req_line_speed;
6719 u32 advertising = bp->advertising;
6720 int err = -EINVAL;
6721
6722 spin_lock_bh(&bp->phy_lock);
6723
6724 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6725 goto err_out_unlock;
6726
6727 if (cmd->port != bp->phy_port &&
6728 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6729 goto err_out_unlock;
6730
6731 /* If device is down, we can store the settings only if the user
6732 * is setting the currently active port.
6733 */
6734 if (!netif_running(dev) && cmd->port != bp->phy_port)
6735 goto err_out_unlock;
6736
6737 if (cmd->autoneg == AUTONEG_ENABLE) {
6738 autoneg |= AUTONEG_SPEED;
6739
6740 advertising = cmd->advertising;
6741 if (cmd->port == PORT_TP) {
6742 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6743 if (!advertising)
6744 advertising = ETHTOOL_ALL_COPPER_SPEED;
6745 } else {
6746 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6747 if (!advertising)
6748 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6749 }
6750 advertising |= ADVERTISED_Autoneg;
6751 }
6752 else {
6753 if (cmd->port == PORT_FIBRE) {
6754 if ((cmd->speed != SPEED_1000 &&
6755 cmd->speed != SPEED_2500) ||
6756 (cmd->duplex != DUPLEX_FULL))
6757 goto err_out_unlock;
6758
6759 if (cmd->speed == SPEED_2500 &&
6760 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6761 goto err_out_unlock;
6762 }
6763 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6764 goto err_out_unlock;
6765
6766 autoneg &= ~AUTONEG_SPEED;
6767 req_line_speed = cmd->speed;
6768 req_duplex = cmd->duplex;
6769 advertising = 0;
6770 }
6771
6772 bp->autoneg = autoneg;
6773 bp->advertising = advertising;
6774 bp->req_line_speed = req_line_speed;
6775 bp->req_duplex = req_duplex;
6776
6777 err = 0;
6778 /* If device is down, the new settings will be picked up when it is
6779 * brought up.
6780 */
6781 if (netif_running(dev))
6782 err = bnx2_setup_phy(bp, cmd->port);
6783
6784 err_out_unlock:
6785 spin_unlock_bh(&bp->phy_lock);
6786
6787 return err;
6788 }
6789
6790 static void
6791 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6792 {
6793 struct bnx2 *bp = netdev_priv(dev);
6794
6795 strcpy(info->driver, DRV_MODULE_NAME);
6796 strcpy(info->version, DRV_MODULE_VERSION);
6797 strcpy(info->bus_info, pci_name(bp->pdev));
6798 strcpy(info->fw_version, bp->fw_version);
6799 }
6800
6801 #define BNX2_REGDUMP_LEN (32 * 1024)
6802
6803 static int
6804 bnx2_get_regs_len(struct net_device *dev)
6805 {
6806 return BNX2_REGDUMP_LEN;
6807 }
6808
6809 static void
6810 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6811 {
6812 u32 *p = _p, i, offset;
6813 u8 *orig_p = _p;
6814 struct bnx2 *bp = netdev_priv(dev);
6815 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6816 0x0800, 0x0880, 0x0c00, 0x0c10,
6817 0x0c30, 0x0d08, 0x1000, 0x101c,
6818 0x1040, 0x1048, 0x1080, 0x10a4,
6819 0x1400, 0x1490, 0x1498, 0x14f0,
6820 0x1500, 0x155c, 0x1580, 0x15dc,
6821 0x1600, 0x1658, 0x1680, 0x16d8,
6822 0x1800, 0x1820, 0x1840, 0x1854,
6823 0x1880, 0x1894, 0x1900, 0x1984,
6824 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6825 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6826 0x2000, 0x2030, 0x23c0, 0x2400,
6827 0x2800, 0x2820, 0x2830, 0x2850,
6828 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6829 0x3c00, 0x3c94, 0x4000, 0x4010,
6830 0x4080, 0x4090, 0x43c0, 0x4458,
6831 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6832 0x4fc0, 0x5010, 0x53c0, 0x5444,
6833 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6834 0x5fc0, 0x6000, 0x6400, 0x6428,
6835 0x6800, 0x6848, 0x684c, 0x6860,
6836 0x6888, 0x6910, 0x8000 };
6837
6838 regs->version = 0;
6839
6840 memset(p, 0, BNX2_REGDUMP_LEN);
6841
6842 if (!netif_running(bp->dev))
6843 return;
6844
6845 i = 0;
6846 offset = reg_boundaries[0];
6847 p += offset;
6848 while (offset < BNX2_REGDUMP_LEN) {
6849 *p++ = REG_RD(bp, offset);
6850 offset += 4;
6851 if (offset == reg_boundaries[i + 1]) {
6852 offset = reg_boundaries[i + 2];
6853 p = (u32 *) (orig_p + offset);
6854 i += 2;
6855 }
6856 }
6857 }
6858
6859 static void
6860 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6861 {
6862 struct bnx2 *bp = netdev_priv(dev);
6863
6864 if (bp->flags & BNX2_FLAG_NO_WOL) {
6865 wol->supported = 0;
6866 wol->wolopts = 0;
6867 }
6868 else {
6869 wol->supported = WAKE_MAGIC;
6870 if (bp->wol)
6871 wol->wolopts = WAKE_MAGIC;
6872 else
6873 wol->wolopts = 0;
6874 }
6875 memset(&wol->sopass, 0, sizeof(wol->sopass));
6876 }
6877
6878 static int
6879 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6880 {
6881 struct bnx2 *bp = netdev_priv(dev);
6882
6883 if (wol->wolopts & ~WAKE_MAGIC)
6884 return -EINVAL;
6885
6886 if (wol->wolopts & WAKE_MAGIC) {
6887 if (bp->flags & BNX2_FLAG_NO_WOL)
6888 return -EINVAL;
6889
6890 bp->wol = 1;
6891 }
6892 else {
6893 bp->wol = 0;
6894 }
6895 return 0;
6896 }
6897
6898 static int
6899 bnx2_nway_reset(struct net_device *dev)
6900 {
6901 struct bnx2 *bp = netdev_priv(dev);
6902 u32 bmcr;
6903
6904 if (!netif_running(dev))
6905 return -EAGAIN;
6906
6907 if (!(bp->autoneg & AUTONEG_SPEED)) {
6908 return -EINVAL;
6909 }
6910
6911 spin_lock_bh(&bp->phy_lock);
6912
6913 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6914 int rc;
6915
6916 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6917 spin_unlock_bh(&bp->phy_lock);
6918 return rc;
6919 }
6920
6921 /* Force a link down visible on the other side */
6922 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6923 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6924 spin_unlock_bh(&bp->phy_lock);
6925
6926 msleep(20);
6927
6928 spin_lock_bh(&bp->phy_lock);
6929
6930 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6931 bp->serdes_an_pending = 1;
6932 mod_timer(&bp->timer, jiffies + bp->current_interval);
6933 }
6934
6935 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6936 bmcr &= ~BMCR_LOOPBACK;
6937 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6938
6939 spin_unlock_bh(&bp->phy_lock);
6940
6941 return 0;
6942 }
6943
6944 static u32
6945 bnx2_get_link(struct net_device *dev)
6946 {
6947 struct bnx2 *bp = netdev_priv(dev);
6948
6949 return bp->link_up;
6950 }
6951
6952 static int
6953 bnx2_get_eeprom_len(struct net_device *dev)
6954 {
6955 struct bnx2 *bp = netdev_priv(dev);
6956
6957 if (bp->flash_info == NULL)
6958 return 0;
6959
6960 return (int) bp->flash_size;
6961 }
6962
6963 static int
6964 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6965 u8 *eebuf)
6966 {
6967 struct bnx2 *bp = netdev_priv(dev);
6968 int rc;
6969
6970 if (!netif_running(dev))
6971 return -EAGAIN;
6972
6973 /* parameters already validated in ethtool_get_eeprom */
6974
6975 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6976
6977 return rc;
6978 }
6979
6980 static int
6981 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6982 u8 *eebuf)
6983 {
6984 struct bnx2 *bp = netdev_priv(dev);
6985 int rc;
6986
6987 if (!netif_running(dev))
6988 return -EAGAIN;
6989
6990 /* parameters already validated in ethtool_set_eeprom */
6991
6992 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6993
6994 return rc;
6995 }
6996
6997 static int
6998 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6999 {
7000 struct bnx2 *bp = netdev_priv(dev);
7001
7002 memset(coal, 0, sizeof(struct ethtool_coalesce));
7003
7004 coal->rx_coalesce_usecs = bp->rx_ticks;
7005 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7006 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7007 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7008
7009 coal->tx_coalesce_usecs = bp->tx_ticks;
7010 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7011 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7012 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7013
7014 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7015
7016 return 0;
7017 }
7018
7019 static int
7020 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7021 {
7022 struct bnx2 *bp = netdev_priv(dev);
7023
7024 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7025 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7026
7027 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7028 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7029
7030 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7031 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7032
7033 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7034 if (bp->rx_quick_cons_trip_int > 0xff)
7035 bp->rx_quick_cons_trip_int = 0xff;
7036
7037 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7038 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7039
7040 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7041 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7042
7043 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7044 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7045
7046 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7047 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7048 0xff;
7049
7050 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7051 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7052 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7053 bp->stats_ticks = USEC_PER_SEC;
7054 }
7055 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7056 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7057 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7058
7059 if (netif_running(bp->dev)) {
7060 bnx2_netif_stop(bp, true);
7061 bnx2_init_nic(bp, 0);
7062 bnx2_netif_start(bp, true);
7063 }
7064
7065 return 0;
7066 }
7067
7068 static void
7069 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7070 {
7071 struct bnx2 *bp = netdev_priv(dev);
7072
7073 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7074 ering->rx_mini_max_pending = 0;
7075 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7076
7077 ering->rx_pending = bp->rx_ring_size;
7078 ering->rx_mini_pending = 0;
7079 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7080
7081 ering->tx_max_pending = MAX_TX_DESC_CNT;
7082 ering->tx_pending = bp->tx_ring_size;
7083 }
7084
7085 static int
7086 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7087 {
7088 if (netif_running(bp->dev)) {
7089 /* Reset will erase chipset stats; save them */
7090 bnx2_save_stats(bp);
7091
7092 bnx2_netif_stop(bp, true);
7093 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7094 bnx2_free_skbs(bp);
7095 bnx2_free_mem(bp);
7096 }
7097
7098 bnx2_set_rx_ring_size(bp, rx);
7099 bp->tx_ring_size = tx;
7100
7101 if (netif_running(bp->dev)) {
7102 int rc;
7103
7104 rc = bnx2_alloc_mem(bp);
7105 if (!rc)
7106 rc = bnx2_init_nic(bp, 0);
7107
7108 if (rc) {
7109 bnx2_napi_enable(bp);
7110 dev_close(bp->dev);
7111 return rc;
7112 }
7113 #ifdef BCM_CNIC
7114 mutex_lock(&bp->cnic_lock);
7115 /* Let cnic know about the new status block. */
7116 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7117 bnx2_setup_cnic_irq_info(bp);
7118 mutex_unlock(&bp->cnic_lock);
7119 #endif
7120 bnx2_netif_start(bp, true);
7121 }
7122 return 0;
7123 }
7124
7125 static int
7126 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7127 {
7128 struct bnx2 *bp = netdev_priv(dev);
7129 int rc;
7130
7131 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7132 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7133 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7134
7135 return -EINVAL;
7136 }
7137 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7138 return rc;
7139 }
7140
7141 static void
7142 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7143 {
7144 struct bnx2 *bp = netdev_priv(dev);
7145
7146 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7147 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7148 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7149 }
7150
7151 static int
7152 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7153 {
7154 struct bnx2 *bp = netdev_priv(dev);
7155
7156 bp->req_flow_ctrl = 0;
7157 if (epause->rx_pause)
7158 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7159 if (epause->tx_pause)
7160 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7161
7162 if (epause->autoneg) {
7163 bp->autoneg |= AUTONEG_FLOW_CTRL;
7164 }
7165 else {
7166 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7167 }
7168
7169 if (netif_running(dev)) {
7170 spin_lock_bh(&bp->phy_lock);
7171 bnx2_setup_phy(bp, bp->phy_port);
7172 spin_unlock_bh(&bp->phy_lock);
7173 }
7174
7175 return 0;
7176 }
7177
7178 static u32
7179 bnx2_get_rx_csum(struct net_device *dev)
7180 {
7181 struct bnx2 *bp = netdev_priv(dev);
7182
7183 return bp->rx_csum;
7184 }
7185
7186 static int
7187 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7188 {
7189 struct bnx2 *bp = netdev_priv(dev);
7190
7191 bp->rx_csum = data;
7192 return 0;
7193 }
7194
7195 static int
7196 bnx2_set_tso(struct net_device *dev, u32 data)
7197 {
7198 struct bnx2 *bp = netdev_priv(dev);
7199
7200 if (data) {
7201 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7202 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7203 dev->features |= NETIF_F_TSO6;
7204 } else
7205 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7206 NETIF_F_TSO_ECN);
7207 return 0;
7208 }
7209
7210 static struct {
7211 char string[ETH_GSTRING_LEN];
7212 } bnx2_stats_str_arr[] = {
7213 { "rx_bytes" },
7214 { "rx_error_bytes" },
7215 { "tx_bytes" },
7216 { "tx_error_bytes" },
7217 { "rx_ucast_packets" },
7218 { "rx_mcast_packets" },
7219 { "rx_bcast_packets" },
7220 { "tx_ucast_packets" },
7221 { "tx_mcast_packets" },
7222 { "tx_bcast_packets" },
7223 { "tx_mac_errors" },
7224 { "tx_carrier_errors" },
7225 { "rx_crc_errors" },
7226 { "rx_align_errors" },
7227 { "tx_single_collisions" },
7228 { "tx_multi_collisions" },
7229 { "tx_deferred" },
7230 { "tx_excess_collisions" },
7231 { "tx_late_collisions" },
7232 { "tx_total_collisions" },
7233 { "rx_fragments" },
7234 { "rx_jabbers" },
7235 { "rx_undersize_packets" },
7236 { "rx_oversize_packets" },
7237 { "rx_64_byte_packets" },
7238 { "rx_65_to_127_byte_packets" },
7239 { "rx_128_to_255_byte_packets" },
7240 { "rx_256_to_511_byte_packets" },
7241 { "rx_512_to_1023_byte_packets" },
7242 { "rx_1024_to_1522_byte_packets" },
7243 { "rx_1523_to_9022_byte_packets" },
7244 { "tx_64_byte_packets" },
7245 { "tx_65_to_127_byte_packets" },
7246 { "tx_128_to_255_byte_packets" },
7247 { "tx_256_to_511_byte_packets" },
7248 { "tx_512_to_1023_byte_packets" },
7249 { "tx_1024_to_1522_byte_packets" },
7250 { "tx_1523_to_9022_byte_packets" },
7251 { "rx_xon_frames" },
7252 { "rx_xoff_frames" },
7253 { "tx_xon_frames" },
7254 { "tx_xoff_frames" },
7255 { "rx_mac_ctrl_frames" },
7256 { "rx_filtered_packets" },
7257 { "rx_ftq_discards" },
7258 { "rx_discards" },
7259 { "rx_fw_discards" },
7260 };
7261
7262 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7263 sizeof(bnx2_stats_str_arr[0]))
7264
7265 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7266
7267 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7268 STATS_OFFSET32(stat_IfHCInOctets_hi),
7269 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7270 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7271 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7272 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7273 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7274 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7275 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7276 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7277 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7278 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7279 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7280 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7281 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7282 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7283 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7284 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7285 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7286 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7287 STATS_OFFSET32(stat_EtherStatsCollisions),
7288 STATS_OFFSET32(stat_EtherStatsFragments),
7289 STATS_OFFSET32(stat_EtherStatsJabbers),
7290 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7291 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7292 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7293 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7294 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7295 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7296 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7297 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7298 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7299 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7300 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7301 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7302 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7303 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7304 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7305 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7306 STATS_OFFSET32(stat_XonPauseFramesReceived),
7307 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7308 STATS_OFFSET32(stat_OutXonSent),
7309 STATS_OFFSET32(stat_OutXoffSent),
7310 STATS_OFFSET32(stat_MacControlFramesReceived),
7311 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7312 STATS_OFFSET32(stat_IfInFTQDiscards),
7313 STATS_OFFSET32(stat_IfInMBUFDiscards),
7314 STATS_OFFSET32(stat_FwRxDrop),
7315 };
7316
7317 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7318 * skipped because of errata.
7319 */
7320 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7321 8,0,8,8,8,8,8,8,8,8,
7322 4,0,4,4,4,4,4,4,4,4,
7323 4,4,4,4,4,4,4,4,4,4,
7324 4,4,4,4,4,4,4,4,4,4,
7325 4,4,4,4,4,4,4,
7326 };
7327
7328 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7329 8,0,8,8,8,8,8,8,8,8,
7330 4,4,4,4,4,4,4,4,4,4,
7331 4,4,4,4,4,4,4,4,4,4,
7332 4,4,4,4,4,4,4,4,4,4,
7333 4,4,4,4,4,4,4,
7334 };
7335
7336 #define BNX2_NUM_TESTS 6
7337
7338 static struct {
7339 char string[ETH_GSTRING_LEN];
7340 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7341 { "register_test (offline)" },
7342 { "memory_test (offline)" },
7343 { "loopback_test (offline)" },
7344 { "nvram_test (online)" },
7345 { "interrupt_test (online)" },
7346 { "link_test (online)" },
7347 };
7348
7349 static int
7350 bnx2_get_sset_count(struct net_device *dev, int sset)
7351 {
7352 switch (sset) {
7353 case ETH_SS_TEST:
7354 return BNX2_NUM_TESTS;
7355 case ETH_SS_STATS:
7356 return BNX2_NUM_STATS;
7357 default:
7358 return -EOPNOTSUPP;
7359 }
7360 }
7361
7362 static void
7363 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7364 {
7365 struct bnx2 *bp = netdev_priv(dev);
7366
7367 bnx2_set_power_state(bp, PCI_D0);
7368
7369 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7370 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7371 int i;
7372
7373 bnx2_netif_stop(bp, true);
7374 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7375 bnx2_free_skbs(bp);
7376
7377 if (bnx2_test_registers(bp) != 0) {
7378 buf[0] = 1;
7379 etest->flags |= ETH_TEST_FL_FAILED;
7380 }
7381 if (bnx2_test_memory(bp) != 0) {
7382 buf[1] = 1;
7383 etest->flags |= ETH_TEST_FL_FAILED;
7384 }
7385 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7386 etest->flags |= ETH_TEST_FL_FAILED;
7387
7388 if (!netif_running(bp->dev))
7389 bnx2_shutdown_chip(bp);
7390 else {
7391 bnx2_init_nic(bp, 1);
7392 bnx2_netif_start(bp, true);
7393 }
7394
7395 /* wait for link up */
7396 for (i = 0; i < 7; i++) {
7397 if (bp->link_up)
7398 break;
7399 msleep_interruptible(1000);
7400 }
7401 }
7402
7403 if (bnx2_test_nvram(bp) != 0) {
7404 buf[3] = 1;
7405 etest->flags |= ETH_TEST_FL_FAILED;
7406 }
7407 if (bnx2_test_intr(bp) != 0) {
7408 buf[4] = 1;
7409 etest->flags |= ETH_TEST_FL_FAILED;
7410 }
7411
7412 if (bnx2_test_link(bp) != 0) {
7413 buf[5] = 1;
7414 etest->flags |= ETH_TEST_FL_FAILED;
7415
7416 }
7417 if (!netif_running(bp->dev))
7418 bnx2_set_power_state(bp, PCI_D3hot);
7419 }
7420
7421 static void
7422 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7423 {
7424 switch (stringset) {
7425 case ETH_SS_STATS:
7426 memcpy(buf, bnx2_stats_str_arr,
7427 sizeof(bnx2_stats_str_arr));
7428 break;
7429 case ETH_SS_TEST:
7430 memcpy(buf, bnx2_tests_str_arr,
7431 sizeof(bnx2_tests_str_arr));
7432 break;
7433 }
7434 }
7435
7436 static void
7437 bnx2_get_ethtool_stats(struct net_device *dev,
7438 struct ethtool_stats *stats, u64 *buf)
7439 {
7440 struct bnx2 *bp = netdev_priv(dev);
7441 int i;
7442 u32 *hw_stats = (u32 *) bp->stats_blk;
7443 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7444 u8 *stats_len_arr = NULL;
7445
7446 if (hw_stats == NULL) {
7447 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7448 return;
7449 }
7450
7451 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7452 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7453 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7454 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7455 stats_len_arr = bnx2_5706_stats_len_arr;
7456 else
7457 stats_len_arr = bnx2_5708_stats_len_arr;
7458
7459 for (i = 0; i < BNX2_NUM_STATS; i++) {
7460 unsigned long offset;
7461
7462 if (stats_len_arr[i] == 0) {
7463 /* skip this counter */
7464 buf[i] = 0;
7465 continue;
7466 }
7467
7468 offset = bnx2_stats_offset_arr[i];
7469 if (stats_len_arr[i] == 4) {
7470 /* 4-byte counter */
7471 buf[i] = (u64) *(hw_stats + offset) +
7472 *(temp_stats + offset);
7473 continue;
7474 }
7475 /* 8-byte counter */
7476 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7477 *(hw_stats + offset + 1) +
7478 (((u64) *(temp_stats + offset)) << 32) +
7479 *(temp_stats + offset + 1);
7480 }
7481 }
7482
7483 static int
7484 bnx2_phys_id(struct net_device *dev, u32 data)
7485 {
7486 struct bnx2 *bp = netdev_priv(dev);
7487 int i;
7488 u32 save;
7489
7490 bnx2_set_power_state(bp, PCI_D0);
7491
7492 if (data == 0)
7493 data = 2;
7494
7495 save = REG_RD(bp, BNX2_MISC_CFG);
7496 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7497
7498 for (i = 0; i < (data * 2); i++) {
7499 if ((i % 2) == 0) {
7500 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7501 }
7502 else {
7503 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7504 BNX2_EMAC_LED_1000MB_OVERRIDE |
7505 BNX2_EMAC_LED_100MB_OVERRIDE |
7506 BNX2_EMAC_LED_10MB_OVERRIDE |
7507 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7508 BNX2_EMAC_LED_TRAFFIC);
7509 }
7510 msleep_interruptible(500);
7511 if (signal_pending(current))
7512 break;
7513 }
7514 REG_WR(bp, BNX2_EMAC_LED, 0);
7515 REG_WR(bp, BNX2_MISC_CFG, save);
7516
7517 if (!netif_running(dev))
7518 bnx2_set_power_state(bp, PCI_D3hot);
7519
7520 return 0;
7521 }
7522
7523 static int
7524 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7525 {
7526 struct bnx2 *bp = netdev_priv(dev);
7527
7528 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7529 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7530 else
7531 return (ethtool_op_set_tx_csum(dev, data));
7532 }
7533
7534 static const struct ethtool_ops bnx2_ethtool_ops = {
7535 .get_settings = bnx2_get_settings,
7536 .set_settings = bnx2_set_settings,
7537 .get_drvinfo = bnx2_get_drvinfo,
7538 .get_regs_len = bnx2_get_regs_len,
7539 .get_regs = bnx2_get_regs,
7540 .get_wol = bnx2_get_wol,
7541 .set_wol = bnx2_set_wol,
7542 .nway_reset = bnx2_nway_reset,
7543 .get_link = bnx2_get_link,
7544 .get_eeprom_len = bnx2_get_eeprom_len,
7545 .get_eeprom = bnx2_get_eeprom,
7546 .set_eeprom = bnx2_set_eeprom,
7547 .get_coalesce = bnx2_get_coalesce,
7548 .set_coalesce = bnx2_set_coalesce,
7549 .get_ringparam = bnx2_get_ringparam,
7550 .set_ringparam = bnx2_set_ringparam,
7551 .get_pauseparam = bnx2_get_pauseparam,
7552 .set_pauseparam = bnx2_set_pauseparam,
7553 .get_rx_csum = bnx2_get_rx_csum,
7554 .set_rx_csum = bnx2_set_rx_csum,
7555 .set_tx_csum = bnx2_set_tx_csum,
7556 .set_sg = ethtool_op_set_sg,
7557 .set_tso = bnx2_set_tso,
7558 .self_test = bnx2_self_test,
7559 .get_strings = bnx2_get_strings,
7560 .phys_id = bnx2_phys_id,
7561 .get_ethtool_stats = bnx2_get_ethtool_stats,
7562 .get_sset_count = bnx2_get_sset_count,
7563 };
7564
7565 /* Called with rtnl_lock */
7566 static int
7567 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7568 {
7569 struct mii_ioctl_data *data = if_mii(ifr);
7570 struct bnx2 *bp = netdev_priv(dev);
7571 int err;
7572
7573 switch(cmd) {
7574 case SIOCGMIIPHY:
7575 data->phy_id = bp->phy_addr;
7576
7577 /* fallthru */
7578 case SIOCGMIIREG: {
7579 u32 mii_regval;
7580
7581 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7582 return -EOPNOTSUPP;
7583
7584 if (!netif_running(dev))
7585 return -EAGAIN;
7586
7587 spin_lock_bh(&bp->phy_lock);
7588 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7589 spin_unlock_bh(&bp->phy_lock);
7590
7591 data->val_out = mii_regval;
7592
7593 return err;
7594 }
7595
7596 case SIOCSMIIREG:
7597 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7598 return -EOPNOTSUPP;
7599
7600 if (!netif_running(dev))
7601 return -EAGAIN;
7602
7603 spin_lock_bh(&bp->phy_lock);
7604 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7605 spin_unlock_bh(&bp->phy_lock);
7606
7607 return err;
7608
7609 default:
7610 /* do nothing */
7611 break;
7612 }
7613 return -EOPNOTSUPP;
7614 }
7615
7616 /* Called with rtnl_lock */
7617 static int
7618 bnx2_change_mac_addr(struct net_device *dev, void *p)
7619 {
7620 struct sockaddr *addr = p;
7621 struct bnx2 *bp = netdev_priv(dev);
7622
7623 if (!is_valid_ether_addr(addr->sa_data))
7624 return -EINVAL;
7625
7626 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7627 if (netif_running(dev))
7628 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7629
7630 return 0;
7631 }
7632
7633 /* Called with rtnl_lock */
7634 static int
7635 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7636 {
7637 struct bnx2 *bp = netdev_priv(dev);
7638
7639 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7640 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7641 return -EINVAL;
7642
7643 dev->mtu = new_mtu;
7644 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7645 }
7646
7647 #ifdef CONFIG_NET_POLL_CONTROLLER
7648 static void
7649 poll_bnx2(struct net_device *dev)
7650 {
7651 struct bnx2 *bp = netdev_priv(dev);
7652 int i;
7653
7654 for (i = 0; i < bp->irq_nvecs; i++) {
7655 struct bnx2_irq *irq = &bp->irq_tbl[i];
7656
7657 disable_irq(irq->vector);
7658 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7659 enable_irq(irq->vector);
7660 }
7661 }
7662 #endif
7663
7664 static void __devinit
7665 bnx2_get_5709_media(struct bnx2 *bp)
7666 {
7667 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7668 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7669 u32 strap;
7670
7671 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7672 return;
7673 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7674 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7675 return;
7676 }
7677
7678 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7679 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7680 else
7681 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7682
7683 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7684 switch (strap) {
7685 case 0x4:
7686 case 0x5:
7687 case 0x6:
7688 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7689 return;
7690 }
7691 } else {
7692 switch (strap) {
7693 case 0x1:
7694 case 0x2:
7695 case 0x4:
7696 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7697 return;
7698 }
7699 }
7700 }
7701
7702 static void __devinit
7703 bnx2_get_pci_speed(struct bnx2 *bp)
7704 {
7705 u32 reg;
7706
7707 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7708 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7709 u32 clkreg;
7710
7711 bp->flags |= BNX2_FLAG_PCIX;
7712
7713 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7714
7715 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7716 switch (clkreg) {
7717 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7718 bp->bus_speed_mhz = 133;
7719 break;
7720
7721 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7722 bp->bus_speed_mhz = 100;
7723 break;
7724
7725 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7726 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7727 bp->bus_speed_mhz = 66;
7728 break;
7729
7730 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7731 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7732 bp->bus_speed_mhz = 50;
7733 break;
7734
7735 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7736 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7737 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7738 bp->bus_speed_mhz = 33;
7739 break;
7740 }
7741 }
7742 else {
7743 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7744 bp->bus_speed_mhz = 66;
7745 else
7746 bp->bus_speed_mhz = 33;
7747 }
7748
7749 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7750 bp->flags |= BNX2_FLAG_PCI_32BIT;
7751
7752 }
7753
7754 static void __devinit
7755 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7756 {
7757 int rc, i, j;
7758 u8 *data;
7759 unsigned int block_end, rosize, len;
7760
7761 #define BNX2_VPD_NVRAM_OFFSET 0x300
7762 #define BNX2_VPD_LEN 128
7763 #define BNX2_MAX_VER_SLEN 30
7764
7765 data = kmalloc(256, GFP_KERNEL);
7766 if (!data)
7767 return;
7768
7769 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7770 BNX2_VPD_LEN);
7771 if (rc)
7772 goto vpd_done;
7773
7774 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7775 data[i] = data[i + BNX2_VPD_LEN + 3];
7776 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7777 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7778 data[i + 3] = data[i + BNX2_VPD_LEN];
7779 }
7780
7781 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7782 if (i < 0)
7783 goto vpd_done;
7784
7785 rosize = pci_vpd_lrdt_size(&data[i]);
7786 i += PCI_VPD_LRDT_TAG_SIZE;
7787 block_end = i + rosize;
7788
7789 if (block_end > BNX2_VPD_LEN)
7790 goto vpd_done;
7791
7792 j = pci_vpd_find_info_keyword(data, i, rosize,
7793 PCI_VPD_RO_KEYWORD_MFR_ID);
7794 if (j < 0)
7795 goto vpd_done;
7796
7797 len = pci_vpd_info_field_size(&data[j]);
7798
7799 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7800 if (j + len > block_end || len != 4 ||
7801 memcmp(&data[j], "1028", 4))
7802 goto vpd_done;
7803
7804 j = pci_vpd_find_info_keyword(data, i, rosize,
7805 PCI_VPD_RO_KEYWORD_VENDOR0);
7806 if (j < 0)
7807 goto vpd_done;
7808
7809 len = pci_vpd_info_field_size(&data[j]);
7810
7811 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7812 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7813 goto vpd_done;
7814
7815 memcpy(bp->fw_version, &data[j], len);
7816 bp->fw_version[len] = ' ';
7817
7818 vpd_done:
7819 kfree(data);
7820 }
7821
7822 static int __devinit
7823 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7824 {
7825 struct bnx2 *bp;
7826 unsigned long mem_len;
7827 int rc, i, j;
7828 u32 reg;
7829 u64 dma_mask, persist_dma_mask;
7830
7831 SET_NETDEV_DEV(dev, &pdev->dev);
7832 bp = netdev_priv(dev);
7833
7834 bp->flags = 0;
7835 bp->phy_flags = 0;
7836
7837 bp->temp_stats_blk =
7838 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7839
7840 if (bp->temp_stats_blk == NULL) {
7841 rc = -ENOMEM;
7842 goto err_out;
7843 }
7844
7845 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7846 rc = pci_enable_device(pdev);
7847 if (rc) {
7848 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7849 goto err_out;
7850 }
7851
7852 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7853 dev_err(&pdev->dev,
7854 "Cannot find PCI device base address, aborting\n");
7855 rc = -ENODEV;
7856 goto err_out_disable;
7857 }
7858
7859 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7860 if (rc) {
7861 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7862 goto err_out_disable;
7863 }
7864
7865 pci_set_master(pdev);
7866 pci_save_state(pdev);
7867
7868 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7869 if (bp->pm_cap == 0) {
7870 dev_err(&pdev->dev,
7871 "Cannot find power management capability, aborting\n");
7872 rc = -EIO;
7873 goto err_out_release;
7874 }
7875
7876 bp->dev = dev;
7877 bp->pdev = pdev;
7878
7879 spin_lock_init(&bp->phy_lock);
7880 spin_lock_init(&bp->indirect_lock);
7881 #ifdef BCM_CNIC
7882 mutex_init(&bp->cnic_lock);
7883 #endif
7884 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7885
7886 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7887 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7888 dev->mem_end = dev->mem_start + mem_len;
7889 dev->irq = pdev->irq;
7890
7891 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7892
7893 if (!bp->regview) {
7894 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7895 rc = -ENOMEM;
7896 goto err_out_release;
7897 }
7898
7899 /* Configure byte swap and enable write to the reg_window registers.
7900 * Rely on CPU to do target byte swapping on big endian systems
7901 * The chip's target access swapping will not swap all accesses
7902 */
7903 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7904 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7905 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7906
7907 bnx2_set_power_state(bp, PCI_D0);
7908
7909 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7910
7911 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7912 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7913 dev_err(&pdev->dev,
7914 "Cannot find PCIE capability, aborting\n");
7915 rc = -EIO;
7916 goto err_out_unmap;
7917 }
7918 bp->flags |= BNX2_FLAG_PCIE;
7919 if (CHIP_REV(bp) == CHIP_REV_Ax)
7920 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7921 } else {
7922 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7923 if (bp->pcix_cap == 0) {
7924 dev_err(&pdev->dev,
7925 "Cannot find PCIX capability, aborting\n");
7926 rc = -EIO;
7927 goto err_out_unmap;
7928 }
7929 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7930 }
7931
7932 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7933 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7934 bp->flags |= BNX2_FLAG_MSIX_CAP;
7935 }
7936
7937 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7938 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7939 bp->flags |= BNX2_FLAG_MSI_CAP;
7940 }
7941
7942 /* 5708 cannot support DMA addresses > 40-bit. */
7943 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7944 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7945 else
7946 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7947
7948 /* Configure DMA attributes. */
7949 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7950 dev->features |= NETIF_F_HIGHDMA;
7951 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7952 if (rc) {
7953 dev_err(&pdev->dev,
7954 "pci_set_consistent_dma_mask failed, aborting\n");
7955 goto err_out_unmap;
7956 }
7957 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7958 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7959 goto err_out_unmap;
7960 }
7961
7962 if (!(bp->flags & BNX2_FLAG_PCIE))
7963 bnx2_get_pci_speed(bp);
7964
7965 /* 5706A0 may falsely detect SERR and PERR. */
7966 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7967 reg = REG_RD(bp, PCI_COMMAND);
7968 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7969 REG_WR(bp, PCI_COMMAND, reg);
7970 }
7971 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7972 !(bp->flags & BNX2_FLAG_PCIX)) {
7973
7974 dev_err(&pdev->dev,
7975 "5706 A1 can only be used in a PCIX bus, aborting\n");
7976 goto err_out_unmap;
7977 }
7978
7979 bnx2_init_nvram(bp);
7980
7981 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7982
7983 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7984 BNX2_SHM_HDR_SIGNATURE_SIG) {
7985 u32 off = PCI_FUNC(pdev->devfn) << 2;
7986
7987 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7988 } else
7989 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7990
7991 /* Get the permanent MAC address. First we need to make sure the
7992 * firmware is actually running.
7993 */
7994 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7995
7996 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7997 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7998 dev_err(&pdev->dev, "Firmware not running, aborting\n");
7999 rc = -ENODEV;
8000 goto err_out_unmap;
8001 }
8002
8003 bnx2_read_vpd_fw_ver(bp);
8004
8005 j = strlen(bp->fw_version);
8006 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8007 for (i = 0; i < 3 && j < 24; i++) {
8008 u8 num, k, skip0;
8009
8010 if (i == 0) {
8011 bp->fw_version[j++] = 'b';
8012 bp->fw_version[j++] = 'c';
8013 bp->fw_version[j++] = ' ';
8014 }
8015 num = (u8) (reg >> (24 - (i * 8)));
8016 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8017 if (num >= k || !skip0 || k == 1) {
8018 bp->fw_version[j++] = (num / k) + '0';
8019 skip0 = 0;
8020 }
8021 }
8022 if (i != 2)
8023 bp->fw_version[j++] = '.';
8024 }
8025 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8026 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8027 bp->wol = 1;
8028
8029 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8030 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8031
8032 for (i = 0; i < 30; i++) {
8033 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8034 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8035 break;
8036 msleep(10);
8037 }
8038 }
8039 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8040 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8041 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8042 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8043 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8044
8045 if (j < 32)
8046 bp->fw_version[j++] = ' ';
8047 for (i = 0; i < 3 && j < 28; i++) {
8048 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8049 reg = swab32(reg);
8050 memcpy(&bp->fw_version[j], &reg, 4);
8051 j += 4;
8052 }
8053 }
8054
8055 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8056 bp->mac_addr[0] = (u8) (reg >> 8);
8057 bp->mac_addr[1] = (u8) reg;
8058
8059 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8060 bp->mac_addr[2] = (u8) (reg >> 24);
8061 bp->mac_addr[3] = (u8) (reg >> 16);
8062 bp->mac_addr[4] = (u8) (reg >> 8);
8063 bp->mac_addr[5] = (u8) reg;
8064
8065 bp->tx_ring_size = MAX_TX_DESC_CNT;
8066 bnx2_set_rx_ring_size(bp, 255);
8067
8068 bp->rx_csum = 1;
8069
8070 bp->tx_quick_cons_trip_int = 2;
8071 bp->tx_quick_cons_trip = 20;
8072 bp->tx_ticks_int = 18;
8073 bp->tx_ticks = 80;
8074
8075 bp->rx_quick_cons_trip_int = 2;
8076 bp->rx_quick_cons_trip = 12;
8077 bp->rx_ticks_int = 18;
8078 bp->rx_ticks = 18;
8079
8080 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8081
8082 bp->current_interval = BNX2_TIMER_INTERVAL;
8083
8084 bp->phy_addr = 1;
8085
8086 /* Disable WOL support if we are running on a SERDES chip. */
8087 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8088 bnx2_get_5709_media(bp);
8089 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8090 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8091
8092 bp->phy_port = PORT_TP;
8093 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8094 bp->phy_port = PORT_FIBRE;
8095 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8096 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8097 bp->flags |= BNX2_FLAG_NO_WOL;
8098 bp->wol = 0;
8099 }
8100 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8101 /* Don't do parallel detect on this board because of
8102 * some board problems. The link will not go down
8103 * if we do parallel detect.
8104 */
8105 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8106 pdev->subsystem_device == 0x310c)
8107 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8108 } else {
8109 bp->phy_addr = 2;
8110 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8111 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8112 }
8113 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8114 CHIP_NUM(bp) == CHIP_NUM_5708)
8115 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8116 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8117 (CHIP_REV(bp) == CHIP_REV_Ax ||
8118 CHIP_REV(bp) == CHIP_REV_Bx))
8119 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8120
8121 bnx2_init_fw_cap(bp);
8122
8123 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8124 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8125 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8126 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8127 bp->flags |= BNX2_FLAG_NO_WOL;
8128 bp->wol = 0;
8129 }
8130
8131 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8132 bp->tx_quick_cons_trip_int =
8133 bp->tx_quick_cons_trip;
8134 bp->tx_ticks_int = bp->tx_ticks;
8135 bp->rx_quick_cons_trip_int =
8136 bp->rx_quick_cons_trip;
8137 bp->rx_ticks_int = bp->rx_ticks;
8138 bp->comp_prod_trip_int = bp->comp_prod_trip;
8139 bp->com_ticks_int = bp->com_ticks;
8140 bp->cmd_ticks_int = bp->cmd_ticks;
8141 }
8142
8143 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8144 *
8145 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8146 * with byte enables disabled on the unused 32-bit word. This is legal
8147 * but causes problems on the AMD 8132 which will eventually stop
8148 * responding after a while.
8149 *
8150 * AMD believes this incompatibility is unique to the 5706, and
8151 * prefers to locally disable MSI rather than globally disabling it.
8152 */
8153 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8154 struct pci_dev *amd_8132 = NULL;
8155
8156 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8157 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8158 amd_8132))) {
8159
8160 if (amd_8132->revision >= 0x10 &&
8161 amd_8132->revision <= 0x13) {
8162 disable_msi = 1;
8163 pci_dev_put(amd_8132);
8164 break;
8165 }
8166 }
8167 }
8168
8169 bnx2_set_default_link(bp);
8170 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8171
8172 init_timer(&bp->timer);
8173 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8174 bp->timer.data = (unsigned long) bp;
8175 bp->timer.function = bnx2_timer;
8176
8177 return 0;
8178
8179 err_out_unmap:
8180 if (bp->regview) {
8181 iounmap(bp->regview);
8182 bp->regview = NULL;
8183 }
8184
8185 err_out_release:
8186 pci_release_regions(pdev);
8187
8188 err_out_disable:
8189 pci_disable_device(pdev);
8190 pci_set_drvdata(pdev, NULL);
8191
8192 err_out:
8193 return rc;
8194 }
8195
8196 static char * __devinit
8197 bnx2_bus_string(struct bnx2 *bp, char *str)
8198 {
8199 char *s = str;
8200
8201 if (bp->flags & BNX2_FLAG_PCIE) {
8202 s += sprintf(s, "PCI Express");
8203 } else {
8204 s += sprintf(s, "PCI");
8205 if (bp->flags & BNX2_FLAG_PCIX)
8206 s += sprintf(s, "-X");
8207 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8208 s += sprintf(s, " 32-bit");
8209 else
8210 s += sprintf(s, " 64-bit");
8211 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8212 }
8213 return str;
8214 }
8215
8216 static void __devinit
8217 bnx2_init_napi(struct bnx2 *bp)
8218 {
8219 int i;
8220
8221 for (i = 0; i < bp->irq_nvecs; i++) {
8222 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8223 int (*poll)(struct napi_struct *, int);
8224
8225 if (i == 0)
8226 poll = bnx2_poll;
8227 else
8228 poll = bnx2_poll_msix;
8229
8230 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8231 bnapi->bp = bp;
8232 }
8233 }
8234
8235 static const struct net_device_ops bnx2_netdev_ops = {
8236 .ndo_open = bnx2_open,
8237 .ndo_start_xmit = bnx2_start_xmit,
8238 .ndo_stop = bnx2_close,
8239 .ndo_get_stats = bnx2_get_stats,
8240 .ndo_set_rx_mode = bnx2_set_rx_mode,
8241 .ndo_do_ioctl = bnx2_ioctl,
8242 .ndo_validate_addr = eth_validate_addr,
8243 .ndo_set_mac_address = bnx2_change_mac_addr,
8244 .ndo_change_mtu = bnx2_change_mtu,
8245 .ndo_tx_timeout = bnx2_tx_timeout,
8246 #ifdef BCM_VLAN
8247 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8248 #endif
8249 #ifdef CONFIG_NET_POLL_CONTROLLER
8250 .ndo_poll_controller = poll_bnx2,
8251 #endif
8252 };
8253
8254 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8255 {
8256 #ifdef BCM_VLAN
8257 dev->vlan_features |= flags;
8258 #endif
8259 }
8260
8261 static int __devinit
8262 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8263 {
8264 static int version_printed = 0;
8265 struct net_device *dev = NULL;
8266 struct bnx2 *bp;
8267 int rc;
8268 char str[40];
8269
8270 if (version_printed++ == 0)
8271 pr_info("%s", version);
8272
8273 /* dev zeroed in init_etherdev */
8274 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8275
8276 if (!dev)
8277 return -ENOMEM;
8278
8279 rc = bnx2_init_board(pdev, dev);
8280 if (rc < 0) {
8281 free_netdev(dev);
8282 return rc;
8283 }
8284
8285 dev->netdev_ops = &bnx2_netdev_ops;
8286 dev->watchdog_timeo = TX_TIMEOUT;
8287 dev->ethtool_ops = &bnx2_ethtool_ops;
8288
8289 bp = netdev_priv(dev);
8290
8291 pci_set_drvdata(pdev, dev);
8292
8293 rc = bnx2_request_firmware(bp);
8294 if (rc)
8295 goto error;
8296
8297 memcpy(dev->dev_addr, bp->mac_addr, 6);
8298 memcpy(dev->perm_addr, bp->mac_addr, 6);
8299
8300 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8301 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8302 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8303 dev->features |= NETIF_F_IPV6_CSUM;
8304 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8305 }
8306 #ifdef BCM_VLAN
8307 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8308 #endif
8309 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8310 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8311 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8312 dev->features |= NETIF_F_TSO6;
8313 vlan_features_add(dev, NETIF_F_TSO6);
8314 }
8315 if ((rc = register_netdev(dev))) {
8316 dev_err(&pdev->dev, "Cannot register net device\n");
8317 goto error;
8318 }
8319
8320 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8321 board_info[ent->driver_data].name,
8322 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8323 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8324 bnx2_bus_string(bp, str),
8325 dev->base_addr,
8326 bp->pdev->irq, dev->dev_addr);
8327
8328 return 0;
8329
8330 error:
8331 if (bp->mips_firmware)
8332 release_firmware(bp->mips_firmware);
8333 if (bp->rv2p_firmware)
8334 release_firmware(bp->rv2p_firmware);
8335
8336 if (bp->regview)
8337 iounmap(bp->regview);
8338 pci_release_regions(pdev);
8339 pci_disable_device(pdev);
8340 pci_set_drvdata(pdev, NULL);
8341 free_netdev(dev);
8342 return rc;
8343 }
8344
8345 static void __devexit
8346 bnx2_remove_one(struct pci_dev *pdev)
8347 {
8348 struct net_device *dev = pci_get_drvdata(pdev);
8349 struct bnx2 *bp = netdev_priv(dev);
8350
8351 flush_scheduled_work();
8352
8353 unregister_netdev(dev);
8354
8355 if (bp->mips_firmware)
8356 release_firmware(bp->mips_firmware);
8357 if (bp->rv2p_firmware)
8358 release_firmware(bp->rv2p_firmware);
8359
8360 if (bp->regview)
8361 iounmap(bp->regview);
8362
8363 kfree(bp->temp_stats_blk);
8364
8365 free_netdev(dev);
8366 pci_release_regions(pdev);
8367 pci_disable_device(pdev);
8368 pci_set_drvdata(pdev, NULL);
8369 }
8370
8371 static int
8372 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8373 {
8374 struct net_device *dev = pci_get_drvdata(pdev);
8375 struct bnx2 *bp = netdev_priv(dev);
8376
8377 /* PCI register 4 needs to be saved whether netif_running() or not.
8378 * MSI address and data need to be saved if using MSI and
8379 * netif_running().
8380 */
8381 pci_save_state(pdev);
8382 if (!netif_running(dev))
8383 return 0;
8384
8385 flush_scheduled_work();
8386 bnx2_netif_stop(bp, true);
8387 netif_device_detach(dev);
8388 del_timer_sync(&bp->timer);
8389 bnx2_shutdown_chip(bp);
8390 bnx2_free_skbs(bp);
8391 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8392 return 0;
8393 }
8394
8395 static int
8396 bnx2_resume(struct pci_dev *pdev)
8397 {
8398 struct net_device *dev = pci_get_drvdata(pdev);
8399 struct bnx2 *bp = netdev_priv(dev);
8400
8401 pci_restore_state(pdev);
8402 if (!netif_running(dev))
8403 return 0;
8404
8405 bnx2_set_power_state(bp, PCI_D0);
8406 netif_device_attach(dev);
8407 bnx2_init_nic(bp, 1);
8408 bnx2_netif_start(bp, true);
8409 return 0;
8410 }
8411
8412 /**
8413 * bnx2_io_error_detected - called when PCI error is detected
8414 * @pdev: Pointer to PCI device
8415 * @state: The current pci connection state
8416 *
8417 * This function is called after a PCI bus error affecting
8418 * this device has been detected.
8419 */
8420 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8421 pci_channel_state_t state)
8422 {
8423 struct net_device *dev = pci_get_drvdata(pdev);
8424 struct bnx2 *bp = netdev_priv(dev);
8425
8426 rtnl_lock();
8427 netif_device_detach(dev);
8428
8429 if (state == pci_channel_io_perm_failure) {
8430 rtnl_unlock();
8431 return PCI_ERS_RESULT_DISCONNECT;
8432 }
8433
8434 if (netif_running(dev)) {
8435 bnx2_netif_stop(bp, true);
8436 del_timer_sync(&bp->timer);
8437 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8438 }
8439
8440 pci_disable_device(pdev);
8441 rtnl_unlock();
8442
8443 /* Request a slot slot reset. */
8444 return PCI_ERS_RESULT_NEED_RESET;
8445 }
8446
8447 /**
8448 * bnx2_io_slot_reset - called after the pci bus has been reset.
8449 * @pdev: Pointer to PCI device
8450 *
8451 * Restart the card from scratch, as if from a cold-boot.
8452 */
8453 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8454 {
8455 struct net_device *dev = pci_get_drvdata(pdev);
8456 struct bnx2 *bp = netdev_priv(dev);
8457
8458 rtnl_lock();
8459 if (pci_enable_device(pdev)) {
8460 dev_err(&pdev->dev,
8461 "Cannot re-enable PCI device after reset\n");
8462 rtnl_unlock();
8463 return PCI_ERS_RESULT_DISCONNECT;
8464 }
8465 pci_set_master(pdev);
8466 pci_restore_state(pdev);
8467 pci_save_state(pdev);
8468
8469 if (netif_running(dev)) {
8470 bnx2_set_power_state(bp, PCI_D0);
8471 bnx2_init_nic(bp, 1);
8472 }
8473
8474 rtnl_unlock();
8475 return PCI_ERS_RESULT_RECOVERED;
8476 }
8477
8478 /**
8479 * bnx2_io_resume - called when traffic can start flowing again.
8480 * @pdev: Pointer to PCI device
8481 *
8482 * This callback is called when the error recovery driver tells us that
8483 * its OK to resume normal operation.
8484 */
8485 static void bnx2_io_resume(struct pci_dev *pdev)
8486 {
8487 struct net_device *dev = pci_get_drvdata(pdev);
8488 struct bnx2 *bp = netdev_priv(dev);
8489
8490 rtnl_lock();
8491 if (netif_running(dev))
8492 bnx2_netif_start(bp, true);
8493
8494 netif_device_attach(dev);
8495 rtnl_unlock();
8496 }
8497
8498 static struct pci_error_handlers bnx2_err_handler = {
8499 .error_detected = bnx2_io_error_detected,
8500 .slot_reset = bnx2_io_slot_reset,
8501 .resume = bnx2_io_resume,
8502 };
8503
8504 static struct pci_driver bnx2_pci_driver = {
8505 .name = DRV_MODULE_NAME,
8506 .id_table = bnx2_pci_tbl,
8507 .probe = bnx2_init_one,
8508 .remove = __devexit_p(bnx2_remove_one),
8509 .suspend = bnx2_suspend,
8510 .resume = bnx2_resume,
8511 .err_handler = &bnx2_err_handler,
8512 };
8513
8514 static int __init bnx2_init(void)
8515 {
8516 return pci_register_driver(&bnx2_pci_driver);
8517 }
8518
8519 static void __exit bnx2_cleanup(void)
8520 {
8521 pci_unregister_driver(&bnx2_pci_driver);
8522 }
8523
8524 module_init(bnx2_init);
8525 module_exit(bnx2_cleanup);
8526
8527
8528