Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52 #include <linux/aer.h>
53
54 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
55 #define BCM_CNIC 1
56 #include "cnic_if.h"
57 #endif
58 #include "bnx2.h"
59 #include "bnx2_fw.h"
60
61 #define DRV_MODULE_NAME "bnx2"
62 #define DRV_MODULE_VERSION "2.0.17"
63 #define DRV_MODULE_RELDATE "July 18, 2010"
64 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
65 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
66 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw"
67 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
68 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
69
70 #define RUN_AT(x) (jiffies + (x))
71
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT (5*HZ)
74
75 static char version[] __devinitdata =
76 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87
88 static int disable_msi = 0;
89
90 module_param(disable_msi, int, 0);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92
93 typedef enum {
94 BCM5706 = 0,
95 NC370T,
96 NC370I,
97 BCM5706S,
98 NC370F,
99 BCM5708,
100 BCM5708S,
101 BCM5709,
102 BCM5709S,
103 BCM5716,
104 BCM5716S,
105 } board_t;
106
107 /* indexed by board_t, above */
108 static struct {
109 char *name;
110 } board_info[] __devinitdata = {
111 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
112 { "HP NC370T Multifunction Gigabit Server Adapter" },
113 { "HP NC370i Multifunction Gigabit Server Adapter" },
114 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115 { "HP NC370F Multifunction Gigabit Server Adapter" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
117 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
119 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
121 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122 };
123
124 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143 { PCI_VENDOR_ID_BROADCOM, 0x163b,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145 { PCI_VENDOR_ID_BROADCOM, 0x163c,
146 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147 { 0, }
148 };
149
150 static const struct flash_spec flash_table[] =
151 {
152 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
154 /* Slow EEPROM */
155 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158 "EEPROM - slow"},
159 /* Expansion entry 0001 */
160 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163 "Entry 0001"},
164 /* Saifun SA25F010 (non-buffered flash) */
165 /* strap, cfg1, & write1 need updates */
166 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169 "Non-buffered flash (128kB)"},
170 /* Saifun SA25F020 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175 "Non-buffered flash (256kB)"},
176 /* Expansion entry 0100 */
177 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 0100"},
181 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
187 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191 /* Saifun SA25F005 (non-buffered flash) */
192 /* strap, cfg1, & write1 need updates */
193 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196 "Non-buffered flash (64kB)"},
197 /* Fast EEPROM */
198 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201 "EEPROM - fast"},
202 /* Expansion entry 1001 */
203 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1001"},
207 /* Expansion entry 1010 */
208 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211 "Entry 1010"},
212 /* ATMEL AT45DB011B (buffered flash) */
213 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216 "Buffered flash (128kB)"},
217 /* Expansion entry 1100 */
218 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221 "Entry 1100"},
222 /* Expansion entry 1101 */
223 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226 "Entry 1101"},
227 /* Ateml Expansion entry 1110 */
228 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231 "Entry 1110 (Atmel)"},
232 /* ATMEL AT45DB021B (buffered flash) */
233 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236 "Buffered flash (256kB)"},
237 };
238
239 static const struct flash_spec flash_5709 = {
240 .flags = BNX2_NV_BUFFERED,
241 .page_bits = BCM5709_FLASH_PAGE_BITS,
242 .page_size = BCM5709_FLASH_PAGE_SIZE,
243 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
244 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
245 .name = "5709 Buffered flash (256kB)",
246 };
247
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249
250 static void bnx2_init_napi(struct bnx2 *bp);
251 static void bnx2_del_napi(struct bnx2 *bp);
252
253 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
254 {
255 u32 diff;
256
257 /* Tell compiler to fetch tx_prod and tx_cons from memory. */
258 barrier();
259
260 /* The ring uses 256 indices for 255 entries, one of them
261 * needs to be skipped.
262 */
263 diff = txr->tx_prod - txr->tx_cons;
264 if (unlikely(diff >= TX_DESC_CNT)) {
265 diff &= 0xffff;
266 if (diff == TX_DESC_CNT)
267 diff = MAX_TX_DESC_CNT;
268 }
269 return (bp->tx_ring_size - diff);
270 }
271
272 static u32
273 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
274 {
275 u32 val;
276
277 spin_lock_bh(&bp->indirect_lock);
278 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
279 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
280 spin_unlock_bh(&bp->indirect_lock);
281 return val;
282 }
283
284 static void
285 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
286 {
287 spin_lock_bh(&bp->indirect_lock);
288 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
289 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
290 spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static void
294 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
295 {
296 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
297 }
298
299 static u32
300 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
301 {
302 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
303 }
304
305 static void
306 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
307 {
308 offset += cid_addr;
309 spin_lock_bh(&bp->indirect_lock);
310 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
311 int i;
312
313 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
314 REG_WR(bp, BNX2_CTX_CTX_CTRL,
315 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
316 for (i = 0; i < 5; i++) {
317 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
318 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
319 break;
320 udelay(5);
321 }
322 } else {
323 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
324 REG_WR(bp, BNX2_CTX_DATA, val);
325 }
326 spin_unlock_bh(&bp->indirect_lock);
327 }
328
329 #ifdef BCM_CNIC
330 static int
331 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
332 {
333 struct bnx2 *bp = netdev_priv(dev);
334 struct drv_ctl_io *io = &info->data.io;
335
336 switch (info->cmd) {
337 case DRV_CTL_IO_WR_CMD:
338 bnx2_reg_wr_ind(bp, io->offset, io->data);
339 break;
340 case DRV_CTL_IO_RD_CMD:
341 io->data = bnx2_reg_rd_ind(bp, io->offset);
342 break;
343 case DRV_CTL_CTX_WR_CMD:
344 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
345 break;
346 default:
347 return -EINVAL;
348 }
349 return 0;
350 }
351
352 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
353 {
354 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
355 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
356 int sb_id;
357
358 if (bp->flags & BNX2_FLAG_USING_MSIX) {
359 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
360 bnapi->cnic_present = 0;
361 sb_id = bp->irq_nvecs;
362 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
363 } else {
364 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
365 bnapi->cnic_tag = bnapi->last_status_idx;
366 bnapi->cnic_present = 1;
367 sb_id = 0;
368 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
369 }
370
371 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
372 cp->irq_arr[0].status_blk = (void *)
373 ((unsigned long) bnapi->status_blk.msi +
374 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
375 cp->irq_arr[0].status_blk_num = sb_id;
376 cp->num_irq = 1;
377 }
378
379 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
380 void *data)
381 {
382 struct bnx2 *bp = netdev_priv(dev);
383 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
384
385 if (ops == NULL)
386 return -EINVAL;
387
388 if (cp->drv_state & CNIC_DRV_STATE_REGD)
389 return -EBUSY;
390
391 bp->cnic_data = data;
392 rcu_assign_pointer(bp->cnic_ops, ops);
393
394 cp->num_irq = 0;
395 cp->drv_state = CNIC_DRV_STATE_REGD;
396
397 bnx2_setup_cnic_irq_info(bp);
398
399 return 0;
400 }
401
402 static int bnx2_unregister_cnic(struct net_device *dev)
403 {
404 struct bnx2 *bp = netdev_priv(dev);
405 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
406 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
407
408 mutex_lock(&bp->cnic_lock);
409 cp->drv_state = 0;
410 bnapi->cnic_present = 0;
411 rcu_assign_pointer(bp->cnic_ops, NULL);
412 mutex_unlock(&bp->cnic_lock);
413 synchronize_rcu();
414 return 0;
415 }
416
417 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
418 {
419 struct bnx2 *bp = netdev_priv(dev);
420 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
421
422 cp->drv_owner = THIS_MODULE;
423 cp->chip_id = bp->chip_id;
424 cp->pdev = bp->pdev;
425 cp->io_base = bp->regview;
426 cp->drv_ctl = bnx2_drv_ctl;
427 cp->drv_register_cnic = bnx2_register_cnic;
428 cp->drv_unregister_cnic = bnx2_unregister_cnic;
429
430 return cp;
431 }
432 EXPORT_SYMBOL(bnx2_cnic_probe);
433
434 static void
435 bnx2_cnic_stop(struct bnx2 *bp)
436 {
437 struct cnic_ops *c_ops;
438 struct cnic_ctl_info info;
439
440 mutex_lock(&bp->cnic_lock);
441 c_ops = bp->cnic_ops;
442 if (c_ops) {
443 info.cmd = CNIC_CTL_STOP_CMD;
444 c_ops->cnic_ctl(bp->cnic_data, &info);
445 }
446 mutex_unlock(&bp->cnic_lock);
447 }
448
449 static void
450 bnx2_cnic_start(struct bnx2 *bp)
451 {
452 struct cnic_ops *c_ops;
453 struct cnic_ctl_info info;
454
455 mutex_lock(&bp->cnic_lock);
456 c_ops = bp->cnic_ops;
457 if (c_ops) {
458 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
459 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
460
461 bnapi->cnic_tag = bnapi->last_status_idx;
462 }
463 info.cmd = CNIC_CTL_START_CMD;
464 c_ops->cnic_ctl(bp->cnic_data, &info);
465 }
466 mutex_unlock(&bp->cnic_lock);
467 }
468
469 #else
470
471 static void
472 bnx2_cnic_stop(struct bnx2 *bp)
473 {
474 }
475
476 static void
477 bnx2_cnic_start(struct bnx2 *bp)
478 {
479 }
480
481 #endif
482
483 static int
484 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
485 {
486 u32 val1;
487 int i, ret;
488
489 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
490 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
491 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
492
493 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
494 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
495
496 udelay(40);
497 }
498
499 val1 = (bp->phy_addr << 21) | (reg << 16) |
500 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
501 BNX2_EMAC_MDIO_COMM_START_BUSY;
502 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
503
504 for (i = 0; i < 50; i++) {
505 udelay(10);
506
507 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
508 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
509 udelay(5);
510
511 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
512 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
513
514 break;
515 }
516 }
517
518 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
519 *val = 0x0;
520 ret = -EBUSY;
521 }
522 else {
523 *val = val1;
524 ret = 0;
525 }
526
527 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
528 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
529 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
530
531 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
532 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
533
534 udelay(40);
535 }
536
537 return ret;
538 }
539
540 static int
541 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
542 {
543 u32 val1;
544 int i, ret;
545
546 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
547 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
548 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
549
550 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
551 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
552
553 udelay(40);
554 }
555
556 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
557 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
558 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
559 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
560
561 for (i = 0; i < 50; i++) {
562 udelay(10);
563
564 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
565 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
566 udelay(5);
567 break;
568 }
569 }
570
571 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
572 ret = -EBUSY;
573 else
574 ret = 0;
575
576 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
577 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
578 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
579
580 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
581 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
582
583 udelay(40);
584 }
585
586 return ret;
587 }
588
589 static void
590 bnx2_disable_int(struct bnx2 *bp)
591 {
592 int i;
593 struct bnx2_napi *bnapi;
594
595 for (i = 0; i < bp->irq_nvecs; i++) {
596 bnapi = &bp->bnx2_napi[i];
597 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
598 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
599 }
600 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
601 }
602
603 static void
604 bnx2_enable_int(struct bnx2 *bp)
605 {
606 int i;
607 struct bnx2_napi *bnapi;
608
609 for (i = 0; i < bp->irq_nvecs; i++) {
610 bnapi = &bp->bnx2_napi[i];
611
612 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
613 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
614 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
615 bnapi->last_status_idx);
616
617 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
618 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
619 bnapi->last_status_idx);
620 }
621 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
622 }
623
624 static void
625 bnx2_disable_int_sync(struct bnx2 *bp)
626 {
627 int i;
628
629 atomic_inc(&bp->intr_sem);
630 if (!netif_running(bp->dev))
631 return;
632
633 bnx2_disable_int(bp);
634 for (i = 0; i < bp->irq_nvecs; i++)
635 synchronize_irq(bp->irq_tbl[i].vector);
636 }
637
638 static void
639 bnx2_napi_disable(struct bnx2 *bp)
640 {
641 int i;
642
643 for (i = 0; i < bp->irq_nvecs; i++)
644 napi_disable(&bp->bnx2_napi[i].napi);
645 }
646
647 static void
648 bnx2_napi_enable(struct bnx2 *bp)
649 {
650 int i;
651
652 for (i = 0; i < bp->irq_nvecs; i++)
653 napi_enable(&bp->bnx2_napi[i].napi);
654 }
655
656 static void
657 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
658 {
659 if (stop_cnic)
660 bnx2_cnic_stop(bp);
661 if (netif_running(bp->dev)) {
662 bnx2_napi_disable(bp);
663 netif_tx_disable(bp->dev);
664 }
665 bnx2_disable_int_sync(bp);
666 netif_carrier_off(bp->dev); /* prevent tx timeout */
667 }
668
669 static void
670 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
671 {
672 if (atomic_dec_and_test(&bp->intr_sem)) {
673 if (netif_running(bp->dev)) {
674 netif_tx_wake_all_queues(bp->dev);
675 spin_lock_bh(&bp->phy_lock);
676 if (bp->link_up)
677 netif_carrier_on(bp->dev);
678 spin_unlock_bh(&bp->phy_lock);
679 bnx2_napi_enable(bp);
680 bnx2_enable_int(bp);
681 if (start_cnic)
682 bnx2_cnic_start(bp);
683 }
684 }
685 }
686
687 static void
688 bnx2_free_tx_mem(struct bnx2 *bp)
689 {
690 int i;
691
692 for (i = 0; i < bp->num_tx_rings; i++) {
693 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
694 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
695
696 if (txr->tx_desc_ring) {
697 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
698 txr->tx_desc_ring,
699 txr->tx_desc_mapping);
700 txr->tx_desc_ring = NULL;
701 }
702 kfree(txr->tx_buf_ring);
703 txr->tx_buf_ring = NULL;
704 }
705 }
706
707 static void
708 bnx2_free_rx_mem(struct bnx2 *bp)
709 {
710 int i;
711
712 for (i = 0; i < bp->num_rx_rings; i++) {
713 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
714 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
715 int j;
716
717 for (j = 0; j < bp->rx_max_ring; j++) {
718 if (rxr->rx_desc_ring[j])
719 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
720 rxr->rx_desc_ring[j],
721 rxr->rx_desc_mapping[j]);
722 rxr->rx_desc_ring[j] = NULL;
723 }
724 vfree(rxr->rx_buf_ring);
725 rxr->rx_buf_ring = NULL;
726
727 for (j = 0; j < bp->rx_max_pg_ring; j++) {
728 if (rxr->rx_pg_desc_ring[j])
729 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
730 rxr->rx_pg_desc_ring[j],
731 rxr->rx_pg_desc_mapping[j]);
732 rxr->rx_pg_desc_ring[j] = NULL;
733 }
734 vfree(rxr->rx_pg_ring);
735 rxr->rx_pg_ring = NULL;
736 }
737 }
738
739 static int
740 bnx2_alloc_tx_mem(struct bnx2 *bp)
741 {
742 int i;
743
744 for (i = 0; i < bp->num_tx_rings; i++) {
745 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
746 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
747
748 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
749 if (txr->tx_buf_ring == NULL)
750 return -ENOMEM;
751
752 txr->tx_desc_ring =
753 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
754 &txr->tx_desc_mapping, GFP_KERNEL);
755 if (txr->tx_desc_ring == NULL)
756 return -ENOMEM;
757 }
758 return 0;
759 }
760
761 static int
762 bnx2_alloc_rx_mem(struct bnx2 *bp)
763 {
764 int i;
765
766 for (i = 0; i < bp->num_rx_rings; i++) {
767 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
768 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
769 int j;
770
771 rxr->rx_buf_ring =
772 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
773 if (rxr->rx_buf_ring == NULL)
774 return -ENOMEM;
775
776 memset(rxr->rx_buf_ring, 0,
777 SW_RXBD_RING_SIZE * bp->rx_max_ring);
778
779 for (j = 0; j < bp->rx_max_ring; j++) {
780 rxr->rx_desc_ring[j] =
781 dma_alloc_coherent(&bp->pdev->dev,
782 RXBD_RING_SIZE,
783 &rxr->rx_desc_mapping[j],
784 GFP_KERNEL);
785 if (rxr->rx_desc_ring[j] == NULL)
786 return -ENOMEM;
787
788 }
789
790 if (bp->rx_pg_ring_size) {
791 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
792 bp->rx_max_pg_ring);
793 if (rxr->rx_pg_ring == NULL)
794 return -ENOMEM;
795
796 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
797 bp->rx_max_pg_ring);
798 }
799
800 for (j = 0; j < bp->rx_max_pg_ring; j++) {
801 rxr->rx_pg_desc_ring[j] =
802 dma_alloc_coherent(&bp->pdev->dev,
803 RXBD_RING_SIZE,
804 &rxr->rx_pg_desc_mapping[j],
805 GFP_KERNEL);
806 if (rxr->rx_pg_desc_ring[j] == NULL)
807 return -ENOMEM;
808
809 }
810 }
811 return 0;
812 }
813
814 static void
815 bnx2_free_mem(struct bnx2 *bp)
816 {
817 int i;
818 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
819
820 bnx2_free_tx_mem(bp);
821 bnx2_free_rx_mem(bp);
822
823 for (i = 0; i < bp->ctx_pages; i++) {
824 if (bp->ctx_blk[i]) {
825 dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
826 bp->ctx_blk[i],
827 bp->ctx_blk_mapping[i]);
828 bp->ctx_blk[i] = NULL;
829 }
830 }
831 if (bnapi->status_blk.msi) {
832 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
833 bnapi->status_blk.msi,
834 bp->status_blk_mapping);
835 bnapi->status_blk.msi = NULL;
836 bp->stats_blk = NULL;
837 }
838 }
839
840 static int
841 bnx2_alloc_mem(struct bnx2 *bp)
842 {
843 int i, status_blk_size, err;
844 struct bnx2_napi *bnapi;
845 void *status_blk;
846
847 /* Combine status and statistics blocks into one allocation. */
848 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
849 if (bp->flags & BNX2_FLAG_MSIX_CAP)
850 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
851 BNX2_SBLK_MSIX_ALIGN_SIZE);
852 bp->status_stats_size = status_blk_size +
853 sizeof(struct statistics_block);
854
855 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
856 &bp->status_blk_mapping, GFP_KERNEL);
857 if (status_blk == NULL)
858 goto alloc_mem_err;
859
860 memset(status_blk, 0, bp->status_stats_size);
861
862 bnapi = &bp->bnx2_napi[0];
863 bnapi->status_blk.msi = status_blk;
864 bnapi->hw_tx_cons_ptr =
865 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
866 bnapi->hw_rx_cons_ptr =
867 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
868 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
869 for (i = 1; i < bp->irq_nvecs; i++) {
870 struct status_block_msix *sblk;
871
872 bnapi = &bp->bnx2_napi[i];
873
874 sblk = (void *) (status_blk +
875 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
876 bnapi->status_blk.msix = sblk;
877 bnapi->hw_tx_cons_ptr =
878 &sblk->status_tx_quick_consumer_index;
879 bnapi->hw_rx_cons_ptr =
880 &sblk->status_rx_quick_consumer_index;
881 bnapi->int_num = i << 24;
882 }
883 }
884
885 bp->stats_blk = status_blk + status_blk_size;
886
887 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
888
889 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
890 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
891 if (bp->ctx_pages == 0)
892 bp->ctx_pages = 1;
893 for (i = 0; i < bp->ctx_pages; i++) {
894 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
895 BCM_PAGE_SIZE,
896 &bp->ctx_blk_mapping[i],
897 GFP_KERNEL);
898 if (bp->ctx_blk[i] == NULL)
899 goto alloc_mem_err;
900 }
901 }
902
903 err = bnx2_alloc_rx_mem(bp);
904 if (err)
905 goto alloc_mem_err;
906
907 err = bnx2_alloc_tx_mem(bp);
908 if (err)
909 goto alloc_mem_err;
910
911 return 0;
912
913 alloc_mem_err:
914 bnx2_free_mem(bp);
915 return -ENOMEM;
916 }
917
918 static void
919 bnx2_report_fw_link(struct bnx2 *bp)
920 {
921 u32 fw_link_status = 0;
922
923 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
924 return;
925
926 if (bp->link_up) {
927 u32 bmsr;
928
929 switch (bp->line_speed) {
930 case SPEED_10:
931 if (bp->duplex == DUPLEX_HALF)
932 fw_link_status = BNX2_LINK_STATUS_10HALF;
933 else
934 fw_link_status = BNX2_LINK_STATUS_10FULL;
935 break;
936 case SPEED_100:
937 if (bp->duplex == DUPLEX_HALF)
938 fw_link_status = BNX2_LINK_STATUS_100HALF;
939 else
940 fw_link_status = BNX2_LINK_STATUS_100FULL;
941 break;
942 case SPEED_1000:
943 if (bp->duplex == DUPLEX_HALF)
944 fw_link_status = BNX2_LINK_STATUS_1000HALF;
945 else
946 fw_link_status = BNX2_LINK_STATUS_1000FULL;
947 break;
948 case SPEED_2500:
949 if (bp->duplex == DUPLEX_HALF)
950 fw_link_status = BNX2_LINK_STATUS_2500HALF;
951 else
952 fw_link_status = BNX2_LINK_STATUS_2500FULL;
953 break;
954 }
955
956 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
957
958 if (bp->autoneg) {
959 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
960
961 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
963
964 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
965 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
966 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
967 else
968 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
969 }
970 }
971 else
972 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
973
974 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
975 }
976
977 static char *
978 bnx2_xceiver_str(struct bnx2 *bp)
979 {
980 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
981 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
982 "Copper"));
983 }
984
985 static void
986 bnx2_report_link(struct bnx2 *bp)
987 {
988 if (bp->link_up) {
989 netif_carrier_on(bp->dev);
990 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
991 bnx2_xceiver_str(bp),
992 bp->line_speed,
993 bp->duplex == DUPLEX_FULL ? "full" : "half");
994
995 if (bp->flow_ctrl) {
996 if (bp->flow_ctrl & FLOW_CTRL_RX) {
997 pr_cont(", receive ");
998 if (bp->flow_ctrl & FLOW_CTRL_TX)
999 pr_cont("& transmit ");
1000 }
1001 else {
1002 pr_cont(", transmit ");
1003 }
1004 pr_cont("flow control ON");
1005 }
1006 pr_cont("\n");
1007 } else {
1008 netif_carrier_off(bp->dev);
1009 netdev_err(bp->dev, "NIC %s Link is Down\n",
1010 bnx2_xceiver_str(bp));
1011 }
1012
1013 bnx2_report_fw_link(bp);
1014 }
1015
1016 static void
1017 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1018 {
1019 u32 local_adv, remote_adv;
1020
1021 bp->flow_ctrl = 0;
1022 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1023 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1024
1025 if (bp->duplex == DUPLEX_FULL) {
1026 bp->flow_ctrl = bp->req_flow_ctrl;
1027 }
1028 return;
1029 }
1030
1031 if (bp->duplex != DUPLEX_FULL) {
1032 return;
1033 }
1034
1035 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1036 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1037 u32 val;
1038
1039 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1040 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1041 bp->flow_ctrl |= FLOW_CTRL_TX;
1042 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1043 bp->flow_ctrl |= FLOW_CTRL_RX;
1044 return;
1045 }
1046
1047 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1048 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1049
1050 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1051 u32 new_local_adv = 0;
1052 u32 new_remote_adv = 0;
1053
1054 if (local_adv & ADVERTISE_1000XPAUSE)
1055 new_local_adv |= ADVERTISE_PAUSE_CAP;
1056 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1057 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1058 if (remote_adv & ADVERTISE_1000XPAUSE)
1059 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1060 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1061 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1062
1063 local_adv = new_local_adv;
1064 remote_adv = new_remote_adv;
1065 }
1066
1067 /* See Table 28B-3 of 802.3ab-1999 spec. */
1068 if (local_adv & ADVERTISE_PAUSE_CAP) {
1069 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1070 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1071 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1072 }
1073 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1074 bp->flow_ctrl = FLOW_CTRL_RX;
1075 }
1076 }
1077 else {
1078 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1079 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1080 }
1081 }
1082 }
1083 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1084 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1085 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1086
1087 bp->flow_ctrl = FLOW_CTRL_TX;
1088 }
1089 }
1090 }
1091
1092 static int
1093 bnx2_5709s_linkup(struct bnx2 *bp)
1094 {
1095 u32 val, speed;
1096
1097 bp->link_up = 1;
1098
1099 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1100 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1101 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1102
1103 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1104 bp->line_speed = bp->req_line_speed;
1105 bp->duplex = bp->req_duplex;
1106 return 0;
1107 }
1108 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1109 switch (speed) {
1110 case MII_BNX2_GP_TOP_AN_SPEED_10:
1111 bp->line_speed = SPEED_10;
1112 break;
1113 case MII_BNX2_GP_TOP_AN_SPEED_100:
1114 bp->line_speed = SPEED_100;
1115 break;
1116 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1117 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1118 bp->line_speed = SPEED_1000;
1119 break;
1120 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1121 bp->line_speed = SPEED_2500;
1122 break;
1123 }
1124 if (val & MII_BNX2_GP_TOP_AN_FD)
1125 bp->duplex = DUPLEX_FULL;
1126 else
1127 bp->duplex = DUPLEX_HALF;
1128 return 0;
1129 }
1130
1131 static int
1132 bnx2_5708s_linkup(struct bnx2 *bp)
1133 {
1134 u32 val;
1135
1136 bp->link_up = 1;
1137 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1138 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1139 case BCM5708S_1000X_STAT1_SPEED_10:
1140 bp->line_speed = SPEED_10;
1141 break;
1142 case BCM5708S_1000X_STAT1_SPEED_100:
1143 bp->line_speed = SPEED_100;
1144 break;
1145 case BCM5708S_1000X_STAT1_SPEED_1G:
1146 bp->line_speed = SPEED_1000;
1147 break;
1148 case BCM5708S_1000X_STAT1_SPEED_2G5:
1149 bp->line_speed = SPEED_2500;
1150 break;
1151 }
1152 if (val & BCM5708S_1000X_STAT1_FD)
1153 bp->duplex = DUPLEX_FULL;
1154 else
1155 bp->duplex = DUPLEX_HALF;
1156
1157 return 0;
1158 }
1159
1160 static int
1161 bnx2_5706s_linkup(struct bnx2 *bp)
1162 {
1163 u32 bmcr, local_adv, remote_adv, common;
1164
1165 bp->link_up = 1;
1166 bp->line_speed = SPEED_1000;
1167
1168 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1169 if (bmcr & BMCR_FULLDPLX) {
1170 bp->duplex = DUPLEX_FULL;
1171 }
1172 else {
1173 bp->duplex = DUPLEX_HALF;
1174 }
1175
1176 if (!(bmcr & BMCR_ANENABLE)) {
1177 return 0;
1178 }
1179
1180 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1181 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1182
1183 common = local_adv & remote_adv;
1184 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1185
1186 if (common & ADVERTISE_1000XFULL) {
1187 bp->duplex = DUPLEX_FULL;
1188 }
1189 else {
1190 bp->duplex = DUPLEX_HALF;
1191 }
1192 }
1193
1194 return 0;
1195 }
1196
1197 static int
1198 bnx2_copper_linkup(struct bnx2 *bp)
1199 {
1200 u32 bmcr;
1201
1202 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1203 if (bmcr & BMCR_ANENABLE) {
1204 u32 local_adv, remote_adv, common;
1205
1206 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1207 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208
1209 common = local_adv & (remote_adv >> 2);
1210 if (common & ADVERTISE_1000FULL) {
1211 bp->line_speed = SPEED_1000;
1212 bp->duplex = DUPLEX_FULL;
1213 }
1214 else if (common & ADVERTISE_1000HALF) {
1215 bp->line_speed = SPEED_1000;
1216 bp->duplex = DUPLEX_HALF;
1217 }
1218 else {
1219 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1220 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1221
1222 common = local_adv & remote_adv;
1223 if (common & ADVERTISE_100FULL) {
1224 bp->line_speed = SPEED_100;
1225 bp->duplex = DUPLEX_FULL;
1226 }
1227 else if (common & ADVERTISE_100HALF) {
1228 bp->line_speed = SPEED_100;
1229 bp->duplex = DUPLEX_HALF;
1230 }
1231 else if (common & ADVERTISE_10FULL) {
1232 bp->line_speed = SPEED_10;
1233 bp->duplex = DUPLEX_FULL;
1234 }
1235 else if (common & ADVERTISE_10HALF) {
1236 bp->line_speed = SPEED_10;
1237 bp->duplex = DUPLEX_HALF;
1238 }
1239 else {
1240 bp->line_speed = 0;
1241 bp->link_up = 0;
1242 }
1243 }
1244 }
1245 else {
1246 if (bmcr & BMCR_SPEED100) {
1247 bp->line_speed = SPEED_100;
1248 }
1249 else {
1250 bp->line_speed = SPEED_10;
1251 }
1252 if (bmcr & BMCR_FULLDPLX) {
1253 bp->duplex = DUPLEX_FULL;
1254 }
1255 else {
1256 bp->duplex = DUPLEX_HALF;
1257 }
1258 }
1259
1260 return 0;
1261 }
1262
1263 static void
1264 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1265 {
1266 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1267
1268 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1269 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1270 val |= 0x02 << 8;
1271
1272 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1273 u32 lo_water, hi_water;
1274
1275 if (bp->flow_ctrl & FLOW_CTRL_TX)
1276 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1277 else
1278 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1279 if (lo_water >= bp->rx_ring_size)
1280 lo_water = 0;
1281
1282 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1283
1284 if (hi_water <= lo_water)
1285 lo_water = 0;
1286
1287 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1288 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1289
1290 if (hi_water > 0xf)
1291 hi_water = 0xf;
1292 else if (hi_water == 0)
1293 lo_water = 0;
1294 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1295 }
1296 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1297 }
1298
1299 static void
1300 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1301 {
1302 int i;
1303 u32 cid;
1304
1305 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1306 if (i == 1)
1307 cid = RX_RSS_CID;
1308 bnx2_init_rx_context(bp, cid);
1309 }
1310 }
1311
1312 static void
1313 bnx2_set_mac_link(struct bnx2 *bp)
1314 {
1315 u32 val;
1316
1317 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1318 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1319 (bp->duplex == DUPLEX_HALF)) {
1320 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1321 }
1322
1323 /* Configure the EMAC mode register. */
1324 val = REG_RD(bp, BNX2_EMAC_MODE);
1325
1326 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1327 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1328 BNX2_EMAC_MODE_25G_MODE);
1329
1330 if (bp->link_up) {
1331 switch (bp->line_speed) {
1332 case SPEED_10:
1333 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1334 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1335 break;
1336 }
1337 /* fall through */
1338 case SPEED_100:
1339 val |= BNX2_EMAC_MODE_PORT_MII;
1340 break;
1341 case SPEED_2500:
1342 val |= BNX2_EMAC_MODE_25G_MODE;
1343 /* fall through */
1344 case SPEED_1000:
1345 val |= BNX2_EMAC_MODE_PORT_GMII;
1346 break;
1347 }
1348 }
1349 else {
1350 val |= BNX2_EMAC_MODE_PORT_GMII;
1351 }
1352
1353 /* Set the MAC to operate in the appropriate duplex mode. */
1354 if (bp->duplex == DUPLEX_HALF)
1355 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1356 REG_WR(bp, BNX2_EMAC_MODE, val);
1357
1358 /* Enable/disable rx PAUSE. */
1359 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1360
1361 if (bp->flow_ctrl & FLOW_CTRL_RX)
1362 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1363 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1364
1365 /* Enable/disable tx PAUSE. */
1366 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1367 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1368
1369 if (bp->flow_ctrl & FLOW_CTRL_TX)
1370 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1371 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1372
1373 /* Acknowledge the interrupt. */
1374 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1375
1376 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1377 bnx2_init_all_rx_contexts(bp);
1378 }
1379
1380 static void
1381 bnx2_enable_bmsr1(struct bnx2 *bp)
1382 {
1383 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1384 (CHIP_NUM(bp) == CHIP_NUM_5709))
1385 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1386 MII_BNX2_BLK_ADDR_GP_STATUS);
1387 }
1388
1389 static void
1390 bnx2_disable_bmsr1(struct bnx2 *bp)
1391 {
1392 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1393 (CHIP_NUM(bp) == CHIP_NUM_5709))
1394 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1395 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1396 }
1397
1398 static int
1399 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1400 {
1401 u32 up1;
1402 int ret = 1;
1403
1404 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1405 return 0;
1406
1407 if (bp->autoneg & AUTONEG_SPEED)
1408 bp->advertising |= ADVERTISED_2500baseX_Full;
1409
1410 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1411 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1412
1413 bnx2_read_phy(bp, bp->mii_up1, &up1);
1414 if (!(up1 & BCM5708S_UP1_2G5)) {
1415 up1 |= BCM5708S_UP1_2G5;
1416 bnx2_write_phy(bp, bp->mii_up1, up1);
1417 ret = 0;
1418 }
1419
1420 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1421 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1422 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1423
1424 return ret;
1425 }
1426
1427 static int
1428 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1429 {
1430 u32 up1;
1431 int ret = 0;
1432
1433 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1434 return 0;
1435
1436 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1437 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1438
1439 bnx2_read_phy(bp, bp->mii_up1, &up1);
1440 if (up1 & BCM5708S_UP1_2G5) {
1441 up1 &= ~BCM5708S_UP1_2G5;
1442 bnx2_write_phy(bp, bp->mii_up1, up1);
1443 ret = 1;
1444 }
1445
1446 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1447 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1448 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1449
1450 return ret;
1451 }
1452
1453 static void
1454 bnx2_enable_forced_2g5(struct bnx2 *bp)
1455 {
1456 u32 uninitialized_var(bmcr);
1457 int err;
1458
1459 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1460 return;
1461
1462 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1463 u32 val;
1464
1465 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1466 MII_BNX2_BLK_ADDR_SERDES_DIG);
1467 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1468 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1469 val |= MII_BNX2_SD_MISC1_FORCE |
1470 MII_BNX2_SD_MISC1_FORCE_2_5G;
1471 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1472 }
1473
1474 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1475 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1476 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1477
1478 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1479 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1480 if (!err)
1481 bmcr |= BCM5708S_BMCR_FORCE_2500;
1482 } else {
1483 return;
1484 }
1485
1486 if (err)
1487 return;
1488
1489 if (bp->autoneg & AUTONEG_SPEED) {
1490 bmcr &= ~BMCR_ANENABLE;
1491 if (bp->req_duplex == DUPLEX_FULL)
1492 bmcr |= BMCR_FULLDPLX;
1493 }
1494 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1495 }
1496
1497 static void
1498 bnx2_disable_forced_2g5(struct bnx2 *bp)
1499 {
1500 u32 uninitialized_var(bmcr);
1501 int err;
1502
1503 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1504 return;
1505
1506 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1507 u32 val;
1508
1509 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1510 MII_BNX2_BLK_ADDR_SERDES_DIG);
1511 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1512 val &= ~MII_BNX2_SD_MISC1_FORCE;
1513 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1514 }
1515
1516 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1517 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1518 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1519
1520 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1521 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1522 if (!err)
1523 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1524 } else {
1525 return;
1526 }
1527
1528 if (err)
1529 return;
1530
1531 if (bp->autoneg & AUTONEG_SPEED)
1532 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1533 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1534 }
1535
1536 static void
1537 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1538 {
1539 u32 val;
1540
1541 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1542 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1543 if (start)
1544 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1545 else
1546 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1547 }
1548
1549 static int
1550 bnx2_set_link(struct bnx2 *bp)
1551 {
1552 u32 bmsr;
1553 u8 link_up;
1554
1555 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1556 bp->link_up = 1;
1557 return 0;
1558 }
1559
1560 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1561 return 0;
1562
1563 link_up = bp->link_up;
1564
1565 bnx2_enable_bmsr1(bp);
1566 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1567 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1568 bnx2_disable_bmsr1(bp);
1569
1570 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1571 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1572 u32 val, an_dbg;
1573
1574 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1575 bnx2_5706s_force_link_dn(bp, 0);
1576 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1577 }
1578 val = REG_RD(bp, BNX2_EMAC_STATUS);
1579
1580 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1581 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1582 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1583
1584 if ((val & BNX2_EMAC_STATUS_LINK) &&
1585 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1586 bmsr |= BMSR_LSTATUS;
1587 else
1588 bmsr &= ~BMSR_LSTATUS;
1589 }
1590
1591 if (bmsr & BMSR_LSTATUS) {
1592 bp->link_up = 1;
1593
1594 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1595 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1596 bnx2_5706s_linkup(bp);
1597 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1598 bnx2_5708s_linkup(bp);
1599 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1600 bnx2_5709s_linkup(bp);
1601 }
1602 else {
1603 bnx2_copper_linkup(bp);
1604 }
1605 bnx2_resolve_flow_ctrl(bp);
1606 }
1607 else {
1608 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1609 (bp->autoneg & AUTONEG_SPEED))
1610 bnx2_disable_forced_2g5(bp);
1611
1612 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1613 u32 bmcr;
1614
1615 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1616 bmcr |= BMCR_ANENABLE;
1617 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1618
1619 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1620 }
1621 bp->link_up = 0;
1622 }
1623
1624 if (bp->link_up != link_up) {
1625 bnx2_report_link(bp);
1626 }
1627
1628 bnx2_set_mac_link(bp);
1629
1630 return 0;
1631 }
1632
1633 static int
1634 bnx2_reset_phy(struct bnx2 *bp)
1635 {
1636 int i;
1637 u32 reg;
1638
1639 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1640
1641 #define PHY_RESET_MAX_WAIT 100
1642 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1643 udelay(10);
1644
1645 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1646 if (!(reg & BMCR_RESET)) {
1647 udelay(20);
1648 break;
1649 }
1650 }
1651 if (i == PHY_RESET_MAX_WAIT) {
1652 return -EBUSY;
1653 }
1654 return 0;
1655 }
1656
1657 static u32
1658 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1659 {
1660 u32 adv = 0;
1661
1662 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1663 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1664
1665 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1666 adv = ADVERTISE_1000XPAUSE;
1667 }
1668 else {
1669 adv = ADVERTISE_PAUSE_CAP;
1670 }
1671 }
1672 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1673 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1674 adv = ADVERTISE_1000XPSE_ASYM;
1675 }
1676 else {
1677 adv = ADVERTISE_PAUSE_ASYM;
1678 }
1679 }
1680 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1681 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1682 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1683 }
1684 else {
1685 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1686 }
1687 }
1688 return adv;
1689 }
1690
1691 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1692
1693 static int
1694 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1695 __releases(&bp->phy_lock)
1696 __acquires(&bp->phy_lock)
1697 {
1698 u32 speed_arg = 0, pause_adv;
1699
1700 pause_adv = bnx2_phy_get_pause_adv(bp);
1701
1702 if (bp->autoneg & AUTONEG_SPEED) {
1703 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1704 if (bp->advertising & ADVERTISED_10baseT_Half)
1705 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1706 if (bp->advertising & ADVERTISED_10baseT_Full)
1707 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1708 if (bp->advertising & ADVERTISED_100baseT_Half)
1709 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1710 if (bp->advertising & ADVERTISED_100baseT_Full)
1711 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1712 if (bp->advertising & ADVERTISED_1000baseT_Full)
1713 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1714 if (bp->advertising & ADVERTISED_2500baseX_Full)
1715 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1716 } else {
1717 if (bp->req_line_speed == SPEED_2500)
1718 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1719 else if (bp->req_line_speed == SPEED_1000)
1720 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1721 else if (bp->req_line_speed == SPEED_100) {
1722 if (bp->req_duplex == DUPLEX_FULL)
1723 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1724 else
1725 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1726 } else if (bp->req_line_speed == SPEED_10) {
1727 if (bp->req_duplex == DUPLEX_FULL)
1728 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1729 else
1730 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1731 }
1732 }
1733
1734 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1735 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1736 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1737 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1738
1739 if (port == PORT_TP)
1740 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1741 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1742
1743 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1744
1745 spin_unlock_bh(&bp->phy_lock);
1746 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1747 spin_lock_bh(&bp->phy_lock);
1748
1749 return 0;
1750 }
1751
1752 static int
1753 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1754 __releases(&bp->phy_lock)
1755 __acquires(&bp->phy_lock)
1756 {
1757 u32 adv, bmcr;
1758 u32 new_adv = 0;
1759
1760 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1761 return (bnx2_setup_remote_phy(bp, port));
1762
1763 if (!(bp->autoneg & AUTONEG_SPEED)) {
1764 u32 new_bmcr;
1765 int force_link_down = 0;
1766
1767 if (bp->req_line_speed == SPEED_2500) {
1768 if (!bnx2_test_and_enable_2g5(bp))
1769 force_link_down = 1;
1770 } else if (bp->req_line_speed == SPEED_1000) {
1771 if (bnx2_test_and_disable_2g5(bp))
1772 force_link_down = 1;
1773 }
1774 bnx2_read_phy(bp, bp->mii_adv, &adv);
1775 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1776
1777 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1778 new_bmcr = bmcr & ~BMCR_ANENABLE;
1779 new_bmcr |= BMCR_SPEED1000;
1780
1781 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1782 if (bp->req_line_speed == SPEED_2500)
1783 bnx2_enable_forced_2g5(bp);
1784 else if (bp->req_line_speed == SPEED_1000) {
1785 bnx2_disable_forced_2g5(bp);
1786 new_bmcr &= ~0x2000;
1787 }
1788
1789 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1790 if (bp->req_line_speed == SPEED_2500)
1791 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1792 else
1793 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1794 }
1795
1796 if (bp->req_duplex == DUPLEX_FULL) {
1797 adv |= ADVERTISE_1000XFULL;
1798 new_bmcr |= BMCR_FULLDPLX;
1799 }
1800 else {
1801 adv |= ADVERTISE_1000XHALF;
1802 new_bmcr &= ~BMCR_FULLDPLX;
1803 }
1804 if ((new_bmcr != bmcr) || (force_link_down)) {
1805 /* Force a link down visible on the other side */
1806 if (bp->link_up) {
1807 bnx2_write_phy(bp, bp->mii_adv, adv &
1808 ~(ADVERTISE_1000XFULL |
1809 ADVERTISE_1000XHALF));
1810 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1811 BMCR_ANRESTART | BMCR_ANENABLE);
1812
1813 bp->link_up = 0;
1814 netif_carrier_off(bp->dev);
1815 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1816 bnx2_report_link(bp);
1817 }
1818 bnx2_write_phy(bp, bp->mii_adv, adv);
1819 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1820 } else {
1821 bnx2_resolve_flow_ctrl(bp);
1822 bnx2_set_mac_link(bp);
1823 }
1824 return 0;
1825 }
1826
1827 bnx2_test_and_enable_2g5(bp);
1828
1829 if (bp->advertising & ADVERTISED_1000baseT_Full)
1830 new_adv |= ADVERTISE_1000XFULL;
1831
1832 new_adv |= bnx2_phy_get_pause_adv(bp);
1833
1834 bnx2_read_phy(bp, bp->mii_adv, &adv);
1835 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1836
1837 bp->serdes_an_pending = 0;
1838 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1839 /* Force a link down visible on the other side */
1840 if (bp->link_up) {
1841 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1842 spin_unlock_bh(&bp->phy_lock);
1843 msleep(20);
1844 spin_lock_bh(&bp->phy_lock);
1845 }
1846
1847 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1848 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1849 BMCR_ANENABLE);
1850 /* Speed up link-up time when the link partner
1851 * does not autonegotiate which is very common
1852 * in blade servers. Some blade servers use
1853 * IPMI for kerboard input and it's important
1854 * to minimize link disruptions. Autoneg. involves
1855 * exchanging base pages plus 3 next pages and
1856 * normally completes in about 120 msec.
1857 */
1858 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1859 bp->serdes_an_pending = 1;
1860 mod_timer(&bp->timer, jiffies + bp->current_interval);
1861 } else {
1862 bnx2_resolve_flow_ctrl(bp);
1863 bnx2_set_mac_link(bp);
1864 }
1865
1866 return 0;
1867 }
1868
1869 #define ETHTOOL_ALL_FIBRE_SPEED \
1870 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1871 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1872 (ADVERTISED_1000baseT_Full)
1873
1874 #define ETHTOOL_ALL_COPPER_SPEED \
1875 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1876 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1877 ADVERTISED_1000baseT_Full)
1878
1879 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1880 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1881
1882 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1883
1884 static void
1885 bnx2_set_default_remote_link(struct bnx2 *bp)
1886 {
1887 u32 link;
1888
1889 if (bp->phy_port == PORT_TP)
1890 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1891 else
1892 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1893
1894 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1895 bp->req_line_speed = 0;
1896 bp->autoneg |= AUTONEG_SPEED;
1897 bp->advertising = ADVERTISED_Autoneg;
1898 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1899 bp->advertising |= ADVERTISED_10baseT_Half;
1900 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1901 bp->advertising |= ADVERTISED_10baseT_Full;
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1903 bp->advertising |= ADVERTISED_100baseT_Half;
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1905 bp->advertising |= ADVERTISED_100baseT_Full;
1906 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1907 bp->advertising |= ADVERTISED_1000baseT_Full;
1908 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1909 bp->advertising |= ADVERTISED_2500baseX_Full;
1910 } else {
1911 bp->autoneg = 0;
1912 bp->advertising = 0;
1913 bp->req_duplex = DUPLEX_FULL;
1914 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1915 bp->req_line_speed = SPEED_10;
1916 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1917 bp->req_duplex = DUPLEX_HALF;
1918 }
1919 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1920 bp->req_line_speed = SPEED_100;
1921 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1922 bp->req_duplex = DUPLEX_HALF;
1923 }
1924 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1925 bp->req_line_speed = SPEED_1000;
1926 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1927 bp->req_line_speed = SPEED_2500;
1928 }
1929 }
1930
1931 static void
1932 bnx2_set_default_link(struct bnx2 *bp)
1933 {
1934 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1935 bnx2_set_default_remote_link(bp);
1936 return;
1937 }
1938
1939 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1940 bp->req_line_speed = 0;
1941 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1942 u32 reg;
1943
1944 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1945
1946 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1947 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1948 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1949 bp->autoneg = 0;
1950 bp->req_line_speed = bp->line_speed = SPEED_1000;
1951 bp->req_duplex = DUPLEX_FULL;
1952 }
1953 } else
1954 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1955 }
1956
1957 static void
1958 bnx2_send_heart_beat(struct bnx2 *bp)
1959 {
1960 u32 msg;
1961 u32 addr;
1962
1963 spin_lock(&bp->indirect_lock);
1964 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1965 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1966 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1967 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1968 spin_unlock(&bp->indirect_lock);
1969 }
1970
1971 static void
1972 bnx2_remote_phy_event(struct bnx2 *bp)
1973 {
1974 u32 msg;
1975 u8 link_up = bp->link_up;
1976 u8 old_port;
1977
1978 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1979
1980 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1981 bnx2_send_heart_beat(bp);
1982
1983 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1984
1985 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1986 bp->link_up = 0;
1987 else {
1988 u32 speed;
1989
1990 bp->link_up = 1;
1991 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1992 bp->duplex = DUPLEX_FULL;
1993 switch (speed) {
1994 case BNX2_LINK_STATUS_10HALF:
1995 bp->duplex = DUPLEX_HALF;
1996 case BNX2_LINK_STATUS_10FULL:
1997 bp->line_speed = SPEED_10;
1998 break;
1999 case BNX2_LINK_STATUS_100HALF:
2000 bp->duplex = DUPLEX_HALF;
2001 case BNX2_LINK_STATUS_100BASE_T4:
2002 case BNX2_LINK_STATUS_100FULL:
2003 bp->line_speed = SPEED_100;
2004 break;
2005 case BNX2_LINK_STATUS_1000HALF:
2006 bp->duplex = DUPLEX_HALF;
2007 case BNX2_LINK_STATUS_1000FULL:
2008 bp->line_speed = SPEED_1000;
2009 break;
2010 case BNX2_LINK_STATUS_2500HALF:
2011 bp->duplex = DUPLEX_HALF;
2012 case BNX2_LINK_STATUS_2500FULL:
2013 bp->line_speed = SPEED_2500;
2014 break;
2015 default:
2016 bp->line_speed = 0;
2017 break;
2018 }
2019
2020 bp->flow_ctrl = 0;
2021 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2022 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2023 if (bp->duplex == DUPLEX_FULL)
2024 bp->flow_ctrl = bp->req_flow_ctrl;
2025 } else {
2026 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2027 bp->flow_ctrl |= FLOW_CTRL_TX;
2028 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2029 bp->flow_ctrl |= FLOW_CTRL_RX;
2030 }
2031
2032 old_port = bp->phy_port;
2033 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2034 bp->phy_port = PORT_FIBRE;
2035 else
2036 bp->phy_port = PORT_TP;
2037
2038 if (old_port != bp->phy_port)
2039 bnx2_set_default_link(bp);
2040
2041 }
2042 if (bp->link_up != link_up)
2043 bnx2_report_link(bp);
2044
2045 bnx2_set_mac_link(bp);
2046 }
2047
2048 static int
2049 bnx2_set_remote_link(struct bnx2 *bp)
2050 {
2051 u32 evt_code;
2052
2053 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2054 switch (evt_code) {
2055 case BNX2_FW_EVT_CODE_LINK_EVENT:
2056 bnx2_remote_phy_event(bp);
2057 break;
2058 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2059 default:
2060 bnx2_send_heart_beat(bp);
2061 break;
2062 }
2063 return 0;
2064 }
2065
2066 static int
2067 bnx2_setup_copper_phy(struct bnx2 *bp)
2068 __releases(&bp->phy_lock)
2069 __acquires(&bp->phy_lock)
2070 {
2071 u32 bmcr;
2072 u32 new_bmcr;
2073
2074 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2075
2076 if (bp->autoneg & AUTONEG_SPEED) {
2077 u32 adv_reg, adv1000_reg;
2078 u32 new_adv_reg = 0;
2079 u32 new_adv1000_reg = 0;
2080
2081 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2082 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2083 ADVERTISE_PAUSE_ASYM);
2084
2085 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2086 adv1000_reg &= PHY_ALL_1000_SPEED;
2087
2088 if (bp->advertising & ADVERTISED_10baseT_Half)
2089 new_adv_reg |= ADVERTISE_10HALF;
2090 if (bp->advertising & ADVERTISED_10baseT_Full)
2091 new_adv_reg |= ADVERTISE_10FULL;
2092 if (bp->advertising & ADVERTISED_100baseT_Half)
2093 new_adv_reg |= ADVERTISE_100HALF;
2094 if (bp->advertising & ADVERTISED_100baseT_Full)
2095 new_adv_reg |= ADVERTISE_100FULL;
2096 if (bp->advertising & ADVERTISED_1000baseT_Full)
2097 new_adv1000_reg |= ADVERTISE_1000FULL;
2098
2099 new_adv_reg |= ADVERTISE_CSMA;
2100
2101 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2102
2103 if ((adv1000_reg != new_adv1000_reg) ||
2104 (adv_reg != new_adv_reg) ||
2105 ((bmcr & BMCR_ANENABLE) == 0)) {
2106
2107 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2108 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2109 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2110 BMCR_ANENABLE);
2111 }
2112 else if (bp->link_up) {
2113 /* Flow ctrl may have changed from auto to forced */
2114 /* or vice-versa. */
2115
2116 bnx2_resolve_flow_ctrl(bp);
2117 bnx2_set_mac_link(bp);
2118 }
2119 return 0;
2120 }
2121
2122 new_bmcr = 0;
2123 if (bp->req_line_speed == SPEED_100) {
2124 new_bmcr |= BMCR_SPEED100;
2125 }
2126 if (bp->req_duplex == DUPLEX_FULL) {
2127 new_bmcr |= BMCR_FULLDPLX;
2128 }
2129 if (new_bmcr != bmcr) {
2130 u32 bmsr;
2131
2132 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2133 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2134
2135 if (bmsr & BMSR_LSTATUS) {
2136 /* Force link down */
2137 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2138 spin_unlock_bh(&bp->phy_lock);
2139 msleep(50);
2140 spin_lock_bh(&bp->phy_lock);
2141
2142 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2143 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2144 }
2145
2146 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2147
2148 /* Normally, the new speed is setup after the link has
2149 * gone down and up again. In some cases, link will not go
2150 * down so we need to set up the new speed here.
2151 */
2152 if (bmsr & BMSR_LSTATUS) {
2153 bp->line_speed = bp->req_line_speed;
2154 bp->duplex = bp->req_duplex;
2155 bnx2_resolve_flow_ctrl(bp);
2156 bnx2_set_mac_link(bp);
2157 }
2158 } else {
2159 bnx2_resolve_flow_ctrl(bp);
2160 bnx2_set_mac_link(bp);
2161 }
2162 return 0;
2163 }
2164
2165 static int
2166 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2167 __releases(&bp->phy_lock)
2168 __acquires(&bp->phy_lock)
2169 {
2170 if (bp->loopback == MAC_LOOPBACK)
2171 return 0;
2172
2173 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2174 return (bnx2_setup_serdes_phy(bp, port));
2175 }
2176 else {
2177 return (bnx2_setup_copper_phy(bp));
2178 }
2179 }
2180
2181 static int
2182 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2183 {
2184 u32 val;
2185
2186 bp->mii_bmcr = MII_BMCR + 0x10;
2187 bp->mii_bmsr = MII_BMSR + 0x10;
2188 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2189 bp->mii_adv = MII_ADVERTISE + 0x10;
2190 bp->mii_lpa = MII_LPA + 0x10;
2191 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2192
2193 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2194 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2195
2196 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2197 if (reset_phy)
2198 bnx2_reset_phy(bp);
2199
2200 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2201
2202 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2203 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2204 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2205 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2206
2207 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2208 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2209 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2210 val |= BCM5708S_UP1_2G5;
2211 else
2212 val &= ~BCM5708S_UP1_2G5;
2213 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2214
2215 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2216 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2217 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2218 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2219
2220 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2221
2222 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2223 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2224 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2225
2226 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2227
2228 return 0;
2229 }
2230
2231 static int
2232 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2233 {
2234 u32 val;
2235
2236 if (reset_phy)
2237 bnx2_reset_phy(bp);
2238
2239 bp->mii_up1 = BCM5708S_UP1;
2240
2241 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2242 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2243 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2244
2245 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2246 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2247 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2248
2249 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2250 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2251 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2252
2253 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2254 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2255 val |= BCM5708S_UP1_2G5;
2256 bnx2_write_phy(bp, BCM5708S_UP1, val);
2257 }
2258
2259 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2260 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2261 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2262 /* increase tx signal amplitude */
2263 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2264 BCM5708S_BLK_ADDR_TX_MISC);
2265 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2266 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2267 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2268 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2269 }
2270
2271 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2272 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2273
2274 if (val) {
2275 u32 is_backplane;
2276
2277 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2278 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2279 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2280 BCM5708S_BLK_ADDR_TX_MISC);
2281 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2282 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2283 BCM5708S_BLK_ADDR_DIG);
2284 }
2285 }
2286 return 0;
2287 }
2288
2289 static int
2290 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2291 {
2292 if (reset_phy)
2293 bnx2_reset_phy(bp);
2294
2295 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2296
2297 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2298 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2299
2300 if (bp->dev->mtu > 1500) {
2301 u32 val;
2302
2303 /* Set extended packet length bit */
2304 bnx2_write_phy(bp, 0x18, 0x7);
2305 bnx2_read_phy(bp, 0x18, &val);
2306 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2307
2308 bnx2_write_phy(bp, 0x1c, 0x6c00);
2309 bnx2_read_phy(bp, 0x1c, &val);
2310 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2311 }
2312 else {
2313 u32 val;
2314
2315 bnx2_write_phy(bp, 0x18, 0x7);
2316 bnx2_read_phy(bp, 0x18, &val);
2317 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2318
2319 bnx2_write_phy(bp, 0x1c, 0x6c00);
2320 bnx2_read_phy(bp, 0x1c, &val);
2321 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2322 }
2323
2324 return 0;
2325 }
2326
2327 static int
2328 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2329 {
2330 u32 val;
2331
2332 if (reset_phy)
2333 bnx2_reset_phy(bp);
2334
2335 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2336 bnx2_write_phy(bp, 0x18, 0x0c00);
2337 bnx2_write_phy(bp, 0x17, 0x000a);
2338 bnx2_write_phy(bp, 0x15, 0x310b);
2339 bnx2_write_phy(bp, 0x17, 0x201f);
2340 bnx2_write_phy(bp, 0x15, 0x9506);
2341 bnx2_write_phy(bp, 0x17, 0x401f);
2342 bnx2_write_phy(bp, 0x15, 0x14e2);
2343 bnx2_write_phy(bp, 0x18, 0x0400);
2344 }
2345
2346 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2347 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2348 MII_BNX2_DSP_EXPAND_REG | 0x8);
2349 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2350 val &= ~(1 << 8);
2351 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2352 }
2353
2354 if (bp->dev->mtu > 1500) {
2355 /* Set extended packet length bit */
2356 bnx2_write_phy(bp, 0x18, 0x7);
2357 bnx2_read_phy(bp, 0x18, &val);
2358 bnx2_write_phy(bp, 0x18, val | 0x4000);
2359
2360 bnx2_read_phy(bp, 0x10, &val);
2361 bnx2_write_phy(bp, 0x10, val | 0x1);
2362 }
2363 else {
2364 bnx2_write_phy(bp, 0x18, 0x7);
2365 bnx2_read_phy(bp, 0x18, &val);
2366 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2367
2368 bnx2_read_phy(bp, 0x10, &val);
2369 bnx2_write_phy(bp, 0x10, val & ~0x1);
2370 }
2371
2372 /* ethernet@wirespeed */
2373 bnx2_write_phy(bp, 0x18, 0x7007);
2374 bnx2_read_phy(bp, 0x18, &val);
2375 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2376 return 0;
2377 }
2378
2379
2380 static int
2381 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2382 __releases(&bp->phy_lock)
2383 __acquires(&bp->phy_lock)
2384 {
2385 u32 val;
2386 int rc = 0;
2387
2388 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2389 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2390
2391 bp->mii_bmcr = MII_BMCR;
2392 bp->mii_bmsr = MII_BMSR;
2393 bp->mii_bmsr1 = MII_BMSR;
2394 bp->mii_adv = MII_ADVERTISE;
2395 bp->mii_lpa = MII_LPA;
2396
2397 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2398
2399 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2400 goto setup_phy;
2401
2402 bnx2_read_phy(bp, MII_PHYSID1, &val);
2403 bp->phy_id = val << 16;
2404 bnx2_read_phy(bp, MII_PHYSID2, &val);
2405 bp->phy_id |= val & 0xffff;
2406
2407 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2408 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2409 rc = bnx2_init_5706s_phy(bp, reset_phy);
2410 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2411 rc = bnx2_init_5708s_phy(bp, reset_phy);
2412 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2413 rc = bnx2_init_5709s_phy(bp, reset_phy);
2414 }
2415 else {
2416 rc = bnx2_init_copper_phy(bp, reset_phy);
2417 }
2418
2419 setup_phy:
2420 if (!rc)
2421 rc = bnx2_setup_phy(bp, bp->phy_port);
2422
2423 return rc;
2424 }
2425
2426 static int
2427 bnx2_set_mac_loopback(struct bnx2 *bp)
2428 {
2429 u32 mac_mode;
2430
2431 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2432 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2433 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2434 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2435 bp->link_up = 1;
2436 return 0;
2437 }
2438
2439 static int bnx2_test_link(struct bnx2 *);
2440
2441 static int
2442 bnx2_set_phy_loopback(struct bnx2 *bp)
2443 {
2444 u32 mac_mode;
2445 int rc, i;
2446
2447 spin_lock_bh(&bp->phy_lock);
2448 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2449 BMCR_SPEED1000);
2450 spin_unlock_bh(&bp->phy_lock);
2451 if (rc)
2452 return rc;
2453
2454 for (i = 0; i < 10; i++) {
2455 if (bnx2_test_link(bp) == 0)
2456 break;
2457 msleep(100);
2458 }
2459
2460 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2461 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2462 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2463 BNX2_EMAC_MODE_25G_MODE);
2464
2465 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2466 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2467 bp->link_up = 1;
2468 return 0;
2469 }
2470
2471 static int
2472 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2473 {
2474 int i;
2475 u32 val;
2476
2477 bp->fw_wr_seq++;
2478 msg_data |= bp->fw_wr_seq;
2479
2480 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2481
2482 if (!ack)
2483 return 0;
2484
2485 /* wait for an acknowledgement. */
2486 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2487 msleep(10);
2488
2489 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2490
2491 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2492 break;
2493 }
2494 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2495 return 0;
2496
2497 /* If we timed out, inform the firmware that this is the case. */
2498 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2499 if (!silent)
2500 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2501
2502 msg_data &= ~BNX2_DRV_MSG_CODE;
2503 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2504
2505 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2506
2507 return -EBUSY;
2508 }
2509
2510 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2511 return -EIO;
2512
2513 return 0;
2514 }
2515
2516 static int
2517 bnx2_init_5709_context(struct bnx2 *bp)
2518 {
2519 int i, ret = 0;
2520 u32 val;
2521
2522 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2523 val |= (BCM_PAGE_BITS - 8) << 16;
2524 REG_WR(bp, BNX2_CTX_COMMAND, val);
2525 for (i = 0; i < 10; i++) {
2526 val = REG_RD(bp, BNX2_CTX_COMMAND);
2527 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2528 break;
2529 udelay(2);
2530 }
2531 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2532 return -EBUSY;
2533
2534 for (i = 0; i < bp->ctx_pages; i++) {
2535 int j;
2536
2537 if (bp->ctx_blk[i])
2538 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2539 else
2540 return -ENOMEM;
2541
2542 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2543 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2544 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2545 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2546 (u64) bp->ctx_blk_mapping[i] >> 32);
2547 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2548 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2549 for (j = 0; j < 10; j++) {
2550
2551 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2552 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2553 break;
2554 udelay(5);
2555 }
2556 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2557 ret = -EBUSY;
2558 break;
2559 }
2560 }
2561 return ret;
2562 }
2563
2564 static void
2565 bnx2_init_context(struct bnx2 *bp)
2566 {
2567 u32 vcid;
2568
2569 vcid = 96;
2570 while (vcid) {
2571 u32 vcid_addr, pcid_addr, offset;
2572 int i;
2573
2574 vcid--;
2575
2576 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2577 u32 new_vcid;
2578
2579 vcid_addr = GET_PCID_ADDR(vcid);
2580 if (vcid & 0x8) {
2581 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2582 }
2583 else {
2584 new_vcid = vcid;
2585 }
2586 pcid_addr = GET_PCID_ADDR(new_vcid);
2587 }
2588 else {
2589 vcid_addr = GET_CID_ADDR(vcid);
2590 pcid_addr = vcid_addr;
2591 }
2592
2593 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2594 vcid_addr += (i << PHY_CTX_SHIFT);
2595 pcid_addr += (i << PHY_CTX_SHIFT);
2596
2597 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2598 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2599
2600 /* Zero out the context. */
2601 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2602 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2603 }
2604 }
2605 }
2606
2607 static int
2608 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2609 {
2610 u16 *good_mbuf;
2611 u32 good_mbuf_cnt;
2612 u32 val;
2613
2614 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2615 if (good_mbuf == NULL) {
2616 pr_err("Failed to allocate memory in %s\n", __func__);
2617 return -ENOMEM;
2618 }
2619
2620 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2621 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2622
2623 good_mbuf_cnt = 0;
2624
2625 /* Allocate a bunch of mbufs and save the good ones in an array. */
2626 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2627 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2628 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2629 BNX2_RBUF_COMMAND_ALLOC_REQ);
2630
2631 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2632
2633 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2634
2635 /* The addresses with Bit 9 set are bad memory blocks. */
2636 if (!(val & (1 << 9))) {
2637 good_mbuf[good_mbuf_cnt] = (u16) val;
2638 good_mbuf_cnt++;
2639 }
2640
2641 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2642 }
2643
2644 /* Free the good ones back to the mbuf pool thus discarding
2645 * all the bad ones. */
2646 while (good_mbuf_cnt) {
2647 good_mbuf_cnt--;
2648
2649 val = good_mbuf[good_mbuf_cnt];
2650 val = (val << 9) | val | 1;
2651
2652 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2653 }
2654 kfree(good_mbuf);
2655 return 0;
2656 }
2657
2658 static void
2659 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2660 {
2661 u32 val;
2662
2663 val = (mac_addr[0] << 8) | mac_addr[1];
2664
2665 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2666
2667 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2668 (mac_addr[4] << 8) | mac_addr[5];
2669
2670 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2671 }
2672
2673 static inline int
2674 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2675 {
2676 dma_addr_t mapping;
2677 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2678 struct rx_bd *rxbd =
2679 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2680 struct page *page = alloc_page(gfp);
2681
2682 if (!page)
2683 return -ENOMEM;
2684 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2685 PCI_DMA_FROMDEVICE);
2686 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2687 __free_page(page);
2688 return -EIO;
2689 }
2690
2691 rx_pg->page = page;
2692 dma_unmap_addr_set(rx_pg, mapping, mapping);
2693 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2694 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2695 return 0;
2696 }
2697
2698 static void
2699 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2700 {
2701 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2702 struct page *page = rx_pg->page;
2703
2704 if (!page)
2705 return;
2706
2707 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2708 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2709
2710 __free_page(page);
2711 rx_pg->page = NULL;
2712 }
2713
2714 static inline int
2715 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2716 {
2717 struct sk_buff *skb;
2718 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2719 dma_addr_t mapping;
2720 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2721 unsigned long align;
2722
2723 skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2724 if (skb == NULL) {
2725 return -ENOMEM;
2726 }
2727
2728 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2729 skb_reserve(skb, BNX2_RX_ALIGN - align);
2730
2731 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2732 PCI_DMA_FROMDEVICE);
2733 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2734 dev_kfree_skb(skb);
2735 return -EIO;
2736 }
2737
2738 rx_buf->skb = skb;
2739 rx_buf->desc = (struct l2_fhdr *) skb->data;
2740 dma_unmap_addr_set(rx_buf, mapping, mapping);
2741
2742 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2743 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2744
2745 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2746
2747 return 0;
2748 }
2749
2750 static int
2751 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2752 {
2753 struct status_block *sblk = bnapi->status_blk.msi;
2754 u32 new_link_state, old_link_state;
2755 int is_set = 1;
2756
2757 new_link_state = sblk->status_attn_bits & event;
2758 old_link_state = sblk->status_attn_bits_ack & event;
2759 if (new_link_state != old_link_state) {
2760 if (new_link_state)
2761 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2762 else
2763 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2764 } else
2765 is_set = 0;
2766
2767 return is_set;
2768 }
2769
2770 static void
2771 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2772 {
2773 spin_lock(&bp->phy_lock);
2774
2775 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2776 bnx2_set_link(bp);
2777 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2778 bnx2_set_remote_link(bp);
2779
2780 spin_unlock(&bp->phy_lock);
2781
2782 }
2783
2784 static inline u16
2785 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2786 {
2787 u16 cons;
2788
2789 /* Tell compiler that status block fields can change. */
2790 barrier();
2791 cons = *bnapi->hw_tx_cons_ptr;
2792 barrier();
2793 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2794 cons++;
2795 return cons;
2796 }
2797
2798 static int
2799 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2800 {
2801 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2802 u16 hw_cons, sw_cons, sw_ring_cons;
2803 int tx_pkt = 0, index;
2804 struct netdev_queue *txq;
2805
2806 index = (bnapi - bp->bnx2_napi);
2807 txq = netdev_get_tx_queue(bp->dev, index);
2808
2809 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2810 sw_cons = txr->tx_cons;
2811
2812 while (sw_cons != hw_cons) {
2813 struct sw_tx_bd *tx_buf;
2814 struct sk_buff *skb;
2815 int i, last;
2816
2817 sw_ring_cons = TX_RING_IDX(sw_cons);
2818
2819 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2820 skb = tx_buf->skb;
2821
2822 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2823 prefetch(&skb->end);
2824
2825 /* partial BD completions possible with TSO packets */
2826 if (tx_buf->is_gso) {
2827 u16 last_idx, last_ring_idx;
2828
2829 last_idx = sw_cons + tx_buf->nr_frags + 1;
2830 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2831 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2832 last_idx++;
2833 }
2834 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2835 break;
2836 }
2837 }
2838
2839 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2840 skb_headlen(skb), PCI_DMA_TODEVICE);
2841
2842 tx_buf->skb = NULL;
2843 last = tx_buf->nr_frags;
2844
2845 for (i = 0; i < last; i++) {
2846 sw_cons = NEXT_TX_BD(sw_cons);
2847
2848 dma_unmap_page(&bp->pdev->dev,
2849 dma_unmap_addr(
2850 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2851 mapping),
2852 skb_shinfo(skb)->frags[i].size,
2853 PCI_DMA_TODEVICE);
2854 }
2855
2856 sw_cons = NEXT_TX_BD(sw_cons);
2857
2858 dev_kfree_skb(skb);
2859 tx_pkt++;
2860 if (tx_pkt == budget)
2861 break;
2862
2863 if (hw_cons == sw_cons)
2864 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2865 }
2866
2867 txr->hw_tx_cons = hw_cons;
2868 txr->tx_cons = sw_cons;
2869
2870 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2871 * before checking for netif_tx_queue_stopped(). Without the
2872 * memory barrier, there is a small possibility that bnx2_start_xmit()
2873 * will miss it and cause the queue to be stopped forever.
2874 */
2875 smp_mb();
2876
2877 if (unlikely(netif_tx_queue_stopped(txq)) &&
2878 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2879 __netif_tx_lock(txq, smp_processor_id());
2880 if ((netif_tx_queue_stopped(txq)) &&
2881 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2882 netif_tx_wake_queue(txq);
2883 __netif_tx_unlock(txq);
2884 }
2885
2886 return tx_pkt;
2887 }
2888
2889 static void
2890 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2891 struct sk_buff *skb, int count)
2892 {
2893 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2894 struct rx_bd *cons_bd, *prod_bd;
2895 int i;
2896 u16 hw_prod, prod;
2897 u16 cons = rxr->rx_pg_cons;
2898
2899 cons_rx_pg = &rxr->rx_pg_ring[cons];
2900
2901 /* The caller was unable to allocate a new page to replace the
2902 * last one in the frags array, so we need to recycle that page
2903 * and then free the skb.
2904 */
2905 if (skb) {
2906 struct page *page;
2907 struct skb_shared_info *shinfo;
2908
2909 shinfo = skb_shinfo(skb);
2910 shinfo->nr_frags--;
2911 page = shinfo->frags[shinfo->nr_frags].page;
2912 shinfo->frags[shinfo->nr_frags].page = NULL;
2913
2914 cons_rx_pg->page = page;
2915 dev_kfree_skb(skb);
2916 }
2917
2918 hw_prod = rxr->rx_pg_prod;
2919
2920 for (i = 0; i < count; i++) {
2921 prod = RX_PG_RING_IDX(hw_prod);
2922
2923 prod_rx_pg = &rxr->rx_pg_ring[prod];
2924 cons_rx_pg = &rxr->rx_pg_ring[cons];
2925 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2926 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2927
2928 if (prod != cons) {
2929 prod_rx_pg->page = cons_rx_pg->page;
2930 cons_rx_pg->page = NULL;
2931 dma_unmap_addr_set(prod_rx_pg, mapping,
2932 dma_unmap_addr(cons_rx_pg, mapping));
2933
2934 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2935 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2936
2937 }
2938 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2939 hw_prod = NEXT_RX_BD(hw_prod);
2940 }
2941 rxr->rx_pg_prod = hw_prod;
2942 rxr->rx_pg_cons = cons;
2943 }
2944
2945 static inline void
2946 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2947 struct sk_buff *skb, u16 cons, u16 prod)
2948 {
2949 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2950 struct rx_bd *cons_bd, *prod_bd;
2951
2952 cons_rx_buf = &rxr->rx_buf_ring[cons];
2953 prod_rx_buf = &rxr->rx_buf_ring[prod];
2954
2955 dma_sync_single_for_device(&bp->pdev->dev,
2956 dma_unmap_addr(cons_rx_buf, mapping),
2957 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2958
2959 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2960
2961 prod_rx_buf->skb = skb;
2962 prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2963
2964 if (cons == prod)
2965 return;
2966
2967 dma_unmap_addr_set(prod_rx_buf, mapping,
2968 dma_unmap_addr(cons_rx_buf, mapping));
2969
2970 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2971 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2972 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2973 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2974 }
2975
2976 static int
2977 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2978 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2979 u32 ring_idx)
2980 {
2981 int err;
2982 u16 prod = ring_idx & 0xffff;
2983
2984 err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
2985 if (unlikely(err)) {
2986 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2987 if (hdr_len) {
2988 unsigned int raw_len = len + 4;
2989 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2990
2991 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2992 }
2993 return err;
2994 }
2995
2996 skb_reserve(skb, BNX2_RX_OFFSET);
2997 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
2998 PCI_DMA_FROMDEVICE);
2999
3000 if (hdr_len == 0) {
3001 skb_put(skb, len);
3002 return 0;
3003 } else {
3004 unsigned int i, frag_len, frag_size, pages;
3005 struct sw_pg *rx_pg;
3006 u16 pg_cons = rxr->rx_pg_cons;
3007 u16 pg_prod = rxr->rx_pg_prod;
3008
3009 frag_size = len + 4 - hdr_len;
3010 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3011 skb_put(skb, hdr_len);
3012
3013 for (i = 0; i < pages; i++) {
3014 dma_addr_t mapping_old;
3015
3016 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3017 if (unlikely(frag_len <= 4)) {
3018 unsigned int tail = 4 - frag_len;
3019
3020 rxr->rx_pg_cons = pg_cons;
3021 rxr->rx_pg_prod = pg_prod;
3022 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3023 pages - i);
3024 skb->len -= tail;
3025 if (i == 0) {
3026 skb->tail -= tail;
3027 } else {
3028 skb_frag_t *frag =
3029 &skb_shinfo(skb)->frags[i - 1];
3030 frag->size -= tail;
3031 skb->data_len -= tail;
3032 skb->truesize -= tail;
3033 }
3034 return 0;
3035 }
3036 rx_pg = &rxr->rx_pg_ring[pg_cons];
3037
3038 /* Don't unmap yet. If we're unable to allocate a new
3039 * page, we need to recycle the page and the DMA addr.
3040 */
3041 mapping_old = dma_unmap_addr(rx_pg, mapping);
3042 if (i == pages - 1)
3043 frag_len -= 4;
3044
3045 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3046 rx_pg->page = NULL;
3047
3048 err = bnx2_alloc_rx_page(bp, rxr,
3049 RX_PG_RING_IDX(pg_prod),
3050 GFP_ATOMIC);
3051 if (unlikely(err)) {
3052 rxr->rx_pg_cons = pg_cons;
3053 rxr->rx_pg_prod = pg_prod;
3054 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3055 pages - i);
3056 return err;
3057 }
3058
3059 dma_unmap_page(&bp->pdev->dev, mapping_old,
3060 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3061
3062 frag_size -= frag_len;
3063 skb->data_len += frag_len;
3064 skb->truesize += frag_len;
3065 skb->len += frag_len;
3066
3067 pg_prod = NEXT_RX_BD(pg_prod);
3068 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3069 }
3070 rxr->rx_pg_prod = pg_prod;
3071 rxr->rx_pg_cons = pg_cons;
3072 }
3073 return 0;
3074 }
3075
3076 static inline u16
3077 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3078 {
3079 u16 cons;
3080
3081 /* Tell compiler that status block fields can change. */
3082 barrier();
3083 cons = *bnapi->hw_rx_cons_ptr;
3084 barrier();
3085 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3086 cons++;
3087 return cons;
3088 }
3089
3090 static int
3091 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3092 {
3093 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3094 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3095 struct l2_fhdr *rx_hdr;
3096 int rx_pkt = 0, pg_ring_used = 0;
3097
3098 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3099 sw_cons = rxr->rx_cons;
3100 sw_prod = rxr->rx_prod;
3101
3102 /* Memory barrier necessary as speculative reads of the rx
3103 * buffer can be ahead of the index in the status block
3104 */
3105 rmb();
3106 while (sw_cons != hw_cons) {
3107 unsigned int len, hdr_len;
3108 u32 status;
3109 struct sw_bd *rx_buf, *next_rx_buf;
3110 struct sk_buff *skb;
3111 dma_addr_t dma_addr;
3112 u16 vtag = 0;
3113 int hw_vlan __maybe_unused = 0;
3114
3115 sw_ring_cons = RX_RING_IDX(sw_cons);
3116 sw_ring_prod = RX_RING_IDX(sw_prod);
3117
3118 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3119 skb = rx_buf->skb;
3120 prefetchw(skb);
3121
3122 next_rx_buf =
3123 &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3124 prefetch(next_rx_buf->desc);
3125
3126 rx_buf->skb = NULL;
3127
3128 dma_addr = dma_unmap_addr(rx_buf, mapping);
3129
3130 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3131 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3132 PCI_DMA_FROMDEVICE);
3133
3134 rx_hdr = rx_buf->desc;
3135 len = rx_hdr->l2_fhdr_pkt_len;
3136 status = rx_hdr->l2_fhdr_status;
3137
3138 hdr_len = 0;
3139 if (status & L2_FHDR_STATUS_SPLIT) {
3140 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3141 pg_ring_used = 1;
3142 } else if (len > bp->rx_jumbo_thresh) {
3143 hdr_len = bp->rx_jumbo_thresh;
3144 pg_ring_used = 1;
3145 }
3146
3147 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3148 L2_FHDR_ERRORS_PHY_DECODE |
3149 L2_FHDR_ERRORS_ALIGNMENT |
3150 L2_FHDR_ERRORS_TOO_SHORT |
3151 L2_FHDR_ERRORS_GIANT_FRAME))) {
3152
3153 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3154 sw_ring_prod);
3155 if (pg_ring_used) {
3156 int pages;
3157
3158 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3159
3160 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3161 }
3162 goto next_rx;
3163 }
3164
3165 len -= 4;
3166
3167 if (len <= bp->rx_copy_thresh) {
3168 struct sk_buff *new_skb;
3169
3170 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3171 if (new_skb == NULL) {
3172 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3173 sw_ring_prod);
3174 goto next_rx;
3175 }
3176
3177 /* aligned copy */
3178 skb_copy_from_linear_data_offset(skb,
3179 BNX2_RX_OFFSET - 6,
3180 new_skb->data, len + 6);
3181 skb_reserve(new_skb, 6);
3182 skb_put(new_skb, len);
3183
3184 bnx2_reuse_rx_skb(bp, rxr, skb,
3185 sw_ring_cons, sw_ring_prod);
3186
3187 skb = new_skb;
3188 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3189 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3190 goto next_rx;
3191
3192 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3193 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3194 vtag = rx_hdr->l2_fhdr_vlan_tag;
3195 #ifdef BCM_VLAN
3196 if (bp->vlgrp)
3197 hw_vlan = 1;
3198 else
3199 #endif
3200 {
3201 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3202 __skb_push(skb, 4);
3203
3204 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3205 ve->h_vlan_proto = htons(ETH_P_8021Q);
3206 ve->h_vlan_TCI = htons(vtag);
3207 len += 4;
3208 }
3209 }
3210
3211 skb->protocol = eth_type_trans(skb, bp->dev);
3212
3213 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3214 (ntohs(skb->protocol) != 0x8100)) {
3215
3216 dev_kfree_skb(skb);
3217 goto next_rx;
3218
3219 }
3220
3221 skb->ip_summed = CHECKSUM_NONE;
3222 if (bp->rx_csum &&
3223 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3224 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3225
3226 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3227 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3228 skb->ip_summed = CHECKSUM_UNNECESSARY;
3229 }
3230 if ((bp->dev->features & NETIF_F_RXHASH) &&
3231 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3232 L2_FHDR_STATUS_USE_RXHASH))
3233 skb->rxhash = rx_hdr->l2_fhdr_hash;
3234
3235 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3236
3237 #ifdef BCM_VLAN
3238 if (hw_vlan)
3239 vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3240 else
3241 #endif
3242 napi_gro_receive(&bnapi->napi, skb);
3243
3244 rx_pkt++;
3245
3246 next_rx:
3247 sw_cons = NEXT_RX_BD(sw_cons);
3248 sw_prod = NEXT_RX_BD(sw_prod);
3249
3250 if ((rx_pkt == budget))
3251 break;
3252
3253 /* Refresh hw_cons to see if there is new work */
3254 if (sw_cons == hw_cons) {
3255 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3256 rmb();
3257 }
3258 }
3259 rxr->rx_cons = sw_cons;
3260 rxr->rx_prod = sw_prod;
3261
3262 if (pg_ring_used)
3263 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3264
3265 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3266
3267 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3268
3269 mmiowb();
3270
3271 return rx_pkt;
3272
3273 }
3274
3275 /* MSI ISR - The only difference between this and the INTx ISR
3276 * is that the MSI interrupt is always serviced.
3277 */
3278 static irqreturn_t
3279 bnx2_msi(int irq, void *dev_instance)
3280 {
3281 struct bnx2_napi *bnapi = dev_instance;
3282 struct bnx2 *bp = bnapi->bp;
3283
3284 prefetch(bnapi->status_blk.msi);
3285 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3286 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3287 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3288
3289 /* Return here if interrupt is disabled. */
3290 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3291 return IRQ_HANDLED;
3292
3293 napi_schedule(&bnapi->napi);
3294
3295 return IRQ_HANDLED;
3296 }
3297
3298 static irqreturn_t
3299 bnx2_msi_1shot(int irq, void *dev_instance)
3300 {
3301 struct bnx2_napi *bnapi = dev_instance;
3302 struct bnx2 *bp = bnapi->bp;
3303
3304 prefetch(bnapi->status_blk.msi);
3305
3306 /* Return here if interrupt is disabled. */
3307 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3308 return IRQ_HANDLED;
3309
3310 napi_schedule(&bnapi->napi);
3311
3312 return IRQ_HANDLED;
3313 }
3314
3315 static irqreturn_t
3316 bnx2_interrupt(int irq, void *dev_instance)
3317 {
3318 struct bnx2_napi *bnapi = dev_instance;
3319 struct bnx2 *bp = bnapi->bp;
3320 struct status_block *sblk = bnapi->status_blk.msi;
3321
3322 /* When using INTx, it is possible for the interrupt to arrive
3323 * at the CPU before the status block posted prior to the
3324 * interrupt. Reading a register will flush the status block.
3325 * When using MSI, the MSI message will always complete after
3326 * the status block write.
3327 */
3328 if ((sblk->status_idx == bnapi->last_status_idx) &&
3329 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3330 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3331 return IRQ_NONE;
3332
3333 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3334 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3335 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3336
3337 /* Read back to deassert IRQ immediately to avoid too many
3338 * spurious interrupts.
3339 */
3340 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3341
3342 /* Return here if interrupt is shared and is disabled. */
3343 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3344 return IRQ_HANDLED;
3345
3346 if (napi_schedule_prep(&bnapi->napi)) {
3347 bnapi->last_status_idx = sblk->status_idx;
3348 __napi_schedule(&bnapi->napi);
3349 }
3350
3351 return IRQ_HANDLED;
3352 }
3353
3354 static inline int
3355 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3356 {
3357 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3358 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3359
3360 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3361 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3362 return 1;
3363 return 0;
3364 }
3365
3366 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3367 STATUS_ATTN_BITS_TIMER_ABORT)
3368
3369 static inline int
3370 bnx2_has_work(struct bnx2_napi *bnapi)
3371 {
3372 struct status_block *sblk = bnapi->status_blk.msi;
3373
3374 if (bnx2_has_fast_work(bnapi))
3375 return 1;
3376
3377 #ifdef BCM_CNIC
3378 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3379 return 1;
3380 #endif
3381
3382 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3383 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3384 return 1;
3385
3386 return 0;
3387 }
3388
3389 static void
3390 bnx2_chk_missed_msi(struct bnx2 *bp)
3391 {
3392 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3393 u32 msi_ctrl;
3394
3395 if (bnx2_has_work(bnapi)) {
3396 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3397 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3398 return;
3399
3400 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3401 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3402 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3403 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3404 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3405 }
3406 }
3407
3408 bp->idle_chk_status_idx = bnapi->last_status_idx;
3409 }
3410
3411 #ifdef BCM_CNIC
3412 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3413 {
3414 struct cnic_ops *c_ops;
3415
3416 if (!bnapi->cnic_present)
3417 return;
3418
3419 rcu_read_lock();
3420 c_ops = rcu_dereference(bp->cnic_ops);
3421 if (c_ops)
3422 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3423 bnapi->status_blk.msi);
3424 rcu_read_unlock();
3425 }
3426 #endif
3427
3428 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3429 {
3430 struct status_block *sblk = bnapi->status_blk.msi;
3431 u32 status_attn_bits = sblk->status_attn_bits;
3432 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3433
3434 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3435 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3436
3437 bnx2_phy_int(bp, bnapi);
3438
3439 /* This is needed to take care of transient status
3440 * during link changes.
3441 */
3442 REG_WR(bp, BNX2_HC_COMMAND,
3443 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3444 REG_RD(bp, BNX2_HC_COMMAND);
3445 }
3446 }
3447
3448 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3449 int work_done, int budget)
3450 {
3451 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3452 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3453
3454 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3455 bnx2_tx_int(bp, bnapi, 0);
3456
3457 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3458 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3459
3460 return work_done;
3461 }
3462
3463 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3464 {
3465 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3466 struct bnx2 *bp = bnapi->bp;
3467 int work_done = 0;
3468 struct status_block_msix *sblk = bnapi->status_blk.msix;
3469
3470 while (1) {
3471 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3472 if (unlikely(work_done >= budget))
3473 break;
3474
3475 bnapi->last_status_idx = sblk->status_idx;
3476 /* status idx must be read before checking for more work. */
3477 rmb();
3478 if (likely(!bnx2_has_fast_work(bnapi))) {
3479
3480 napi_complete(napi);
3481 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3482 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3483 bnapi->last_status_idx);
3484 break;
3485 }
3486 }
3487 return work_done;
3488 }
3489
3490 static int bnx2_poll(struct napi_struct *napi, int budget)
3491 {
3492 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3493 struct bnx2 *bp = bnapi->bp;
3494 int work_done = 0;
3495 struct status_block *sblk = bnapi->status_blk.msi;
3496
3497 while (1) {
3498 bnx2_poll_link(bp, bnapi);
3499
3500 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3501
3502 #ifdef BCM_CNIC
3503 bnx2_poll_cnic(bp, bnapi);
3504 #endif
3505
3506 /* bnapi->last_status_idx is used below to tell the hw how
3507 * much work has been processed, so we must read it before
3508 * checking for more work.
3509 */
3510 bnapi->last_status_idx = sblk->status_idx;
3511
3512 if (unlikely(work_done >= budget))
3513 break;
3514
3515 rmb();
3516 if (likely(!bnx2_has_work(bnapi))) {
3517 napi_complete(napi);
3518 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3519 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3520 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3521 bnapi->last_status_idx);
3522 break;
3523 }
3524 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3525 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3526 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3527 bnapi->last_status_idx);
3528
3529 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3530 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3531 bnapi->last_status_idx);
3532 break;
3533 }
3534 }
3535
3536 return work_done;
3537 }
3538
3539 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3540 * from set_multicast.
3541 */
3542 static void
3543 bnx2_set_rx_mode(struct net_device *dev)
3544 {
3545 struct bnx2 *bp = netdev_priv(dev);
3546 u32 rx_mode, sort_mode;
3547 struct netdev_hw_addr *ha;
3548 int i;
3549
3550 if (!netif_running(dev))
3551 return;
3552
3553 spin_lock_bh(&bp->phy_lock);
3554
3555 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3556 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3557 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3558 #ifdef BCM_VLAN
3559 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3560 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3561 #else
3562 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3563 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3564 #endif
3565 if (dev->flags & IFF_PROMISC) {
3566 /* Promiscuous mode. */
3567 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3568 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3569 BNX2_RPM_SORT_USER0_PROM_VLAN;
3570 }
3571 else if (dev->flags & IFF_ALLMULTI) {
3572 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3573 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3574 0xffffffff);
3575 }
3576 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3577 }
3578 else {
3579 /* Accept one or more multicast(s). */
3580 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3581 u32 regidx;
3582 u32 bit;
3583 u32 crc;
3584
3585 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3586
3587 netdev_for_each_mc_addr(ha, dev) {
3588 crc = ether_crc_le(ETH_ALEN, ha->addr);
3589 bit = crc & 0xff;
3590 regidx = (bit & 0xe0) >> 5;
3591 bit &= 0x1f;
3592 mc_filter[regidx] |= (1 << bit);
3593 }
3594
3595 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3596 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3597 mc_filter[i]);
3598 }
3599
3600 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3601 }
3602
3603 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3604 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3605 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3606 BNX2_RPM_SORT_USER0_PROM_VLAN;
3607 } else if (!(dev->flags & IFF_PROMISC)) {
3608 /* Add all entries into to the match filter list */
3609 i = 0;
3610 netdev_for_each_uc_addr(ha, dev) {
3611 bnx2_set_mac_addr(bp, ha->addr,
3612 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3613 sort_mode |= (1 <<
3614 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3615 i++;
3616 }
3617
3618 }
3619
3620 if (rx_mode != bp->rx_mode) {
3621 bp->rx_mode = rx_mode;
3622 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3623 }
3624
3625 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3626 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3627 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3628
3629 spin_unlock_bh(&bp->phy_lock);
3630 }
3631
3632 static int __devinit
3633 check_fw_section(const struct firmware *fw,
3634 const struct bnx2_fw_file_section *section,
3635 u32 alignment, bool non_empty)
3636 {
3637 u32 offset = be32_to_cpu(section->offset);
3638 u32 len = be32_to_cpu(section->len);
3639
3640 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3641 return -EINVAL;
3642 if ((non_empty && len == 0) || len > fw->size - offset ||
3643 len & (alignment - 1))
3644 return -EINVAL;
3645 return 0;
3646 }
3647
3648 static int __devinit
3649 check_mips_fw_entry(const struct firmware *fw,
3650 const struct bnx2_mips_fw_file_entry *entry)
3651 {
3652 if (check_fw_section(fw, &entry->text, 4, true) ||
3653 check_fw_section(fw, &entry->data, 4, false) ||
3654 check_fw_section(fw, &entry->rodata, 4, false))
3655 return -EINVAL;
3656 return 0;
3657 }
3658
3659 static int __devinit
3660 bnx2_request_firmware(struct bnx2 *bp)
3661 {
3662 const char *mips_fw_file, *rv2p_fw_file;
3663 const struct bnx2_mips_fw_file *mips_fw;
3664 const struct bnx2_rv2p_fw_file *rv2p_fw;
3665 int rc;
3666
3667 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3668 mips_fw_file = FW_MIPS_FILE_09;
3669 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3670 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3671 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3672 else
3673 rv2p_fw_file = FW_RV2P_FILE_09;
3674 } else {
3675 mips_fw_file = FW_MIPS_FILE_06;
3676 rv2p_fw_file = FW_RV2P_FILE_06;
3677 }
3678
3679 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3680 if (rc) {
3681 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3682 return rc;
3683 }
3684
3685 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3686 if (rc) {
3687 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3688 return rc;
3689 }
3690 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3691 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3692 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3693 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3694 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3695 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3696 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3697 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3698 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3699 return -EINVAL;
3700 }
3701 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3702 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3703 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3704 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3705 return -EINVAL;
3706 }
3707
3708 return 0;
3709 }
3710
3711 static u32
3712 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3713 {
3714 switch (idx) {
3715 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3716 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3717 rv2p_code |= RV2P_BD_PAGE_SIZE;
3718 break;
3719 }
3720 return rv2p_code;
3721 }
3722
3723 static int
3724 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3725 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3726 {
3727 u32 rv2p_code_len, file_offset;
3728 __be32 *rv2p_code;
3729 int i;
3730 u32 val, cmd, addr;
3731
3732 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3733 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3734
3735 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3736
3737 if (rv2p_proc == RV2P_PROC1) {
3738 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3739 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3740 } else {
3741 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3742 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3743 }
3744
3745 for (i = 0; i < rv2p_code_len; i += 8) {
3746 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3747 rv2p_code++;
3748 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3749 rv2p_code++;
3750
3751 val = (i / 8) | cmd;
3752 REG_WR(bp, addr, val);
3753 }
3754
3755 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3756 for (i = 0; i < 8; i++) {
3757 u32 loc, code;
3758
3759 loc = be32_to_cpu(fw_entry->fixup[i]);
3760 if (loc && ((loc * 4) < rv2p_code_len)) {
3761 code = be32_to_cpu(*(rv2p_code + loc - 1));
3762 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3763 code = be32_to_cpu(*(rv2p_code + loc));
3764 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3765 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3766
3767 val = (loc / 2) | cmd;
3768 REG_WR(bp, addr, val);
3769 }
3770 }
3771
3772 /* Reset the processor, un-stall is done later. */
3773 if (rv2p_proc == RV2P_PROC1) {
3774 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3775 }
3776 else {
3777 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3778 }
3779
3780 return 0;
3781 }
3782
3783 static int
3784 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3785 const struct bnx2_mips_fw_file_entry *fw_entry)
3786 {
3787 u32 addr, len, file_offset;
3788 __be32 *data;
3789 u32 offset;
3790 u32 val;
3791
3792 /* Halt the CPU. */
3793 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3794 val |= cpu_reg->mode_value_halt;
3795 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3796 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3797
3798 /* Load the Text area. */
3799 addr = be32_to_cpu(fw_entry->text.addr);
3800 len = be32_to_cpu(fw_entry->text.len);
3801 file_offset = be32_to_cpu(fw_entry->text.offset);
3802 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3803
3804 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3805 if (len) {
3806 int j;
3807
3808 for (j = 0; j < (len / 4); j++, offset += 4)
3809 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3810 }
3811
3812 /* Load the Data area. */
3813 addr = be32_to_cpu(fw_entry->data.addr);
3814 len = be32_to_cpu(fw_entry->data.len);
3815 file_offset = be32_to_cpu(fw_entry->data.offset);
3816 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3817
3818 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3819 if (len) {
3820 int j;
3821
3822 for (j = 0; j < (len / 4); j++, offset += 4)
3823 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3824 }
3825
3826 /* Load the Read-Only area. */
3827 addr = be32_to_cpu(fw_entry->rodata.addr);
3828 len = be32_to_cpu(fw_entry->rodata.len);
3829 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3830 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3831
3832 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3833 if (len) {
3834 int j;
3835
3836 for (j = 0; j < (len / 4); j++, offset += 4)
3837 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3838 }
3839
3840 /* Clear the pre-fetch instruction. */
3841 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3842
3843 val = be32_to_cpu(fw_entry->start_addr);
3844 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3845
3846 /* Start the CPU. */
3847 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3848 val &= ~cpu_reg->mode_value_halt;
3849 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3850 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3851
3852 return 0;
3853 }
3854
3855 static int
3856 bnx2_init_cpus(struct bnx2 *bp)
3857 {
3858 const struct bnx2_mips_fw_file *mips_fw =
3859 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3860 const struct bnx2_rv2p_fw_file *rv2p_fw =
3861 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3862 int rc;
3863
3864 /* Initialize the RV2P processor. */
3865 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3866 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3867
3868 /* Initialize the RX Processor. */
3869 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3870 if (rc)
3871 goto init_cpu_err;
3872
3873 /* Initialize the TX Processor. */
3874 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3875 if (rc)
3876 goto init_cpu_err;
3877
3878 /* Initialize the TX Patch-up Processor. */
3879 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3880 if (rc)
3881 goto init_cpu_err;
3882
3883 /* Initialize the Completion Processor. */
3884 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3885 if (rc)
3886 goto init_cpu_err;
3887
3888 /* Initialize the Command Processor. */
3889 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3890
3891 init_cpu_err:
3892 return rc;
3893 }
3894
3895 static int
3896 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3897 {
3898 u16 pmcsr;
3899
3900 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3901
3902 switch (state) {
3903 case PCI_D0: {
3904 u32 val;
3905
3906 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3907 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3908 PCI_PM_CTRL_PME_STATUS);
3909
3910 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3911 /* delay required during transition out of D3hot */
3912 msleep(20);
3913
3914 val = REG_RD(bp, BNX2_EMAC_MODE);
3915 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3916 val &= ~BNX2_EMAC_MODE_MPKT;
3917 REG_WR(bp, BNX2_EMAC_MODE, val);
3918
3919 val = REG_RD(bp, BNX2_RPM_CONFIG);
3920 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3921 REG_WR(bp, BNX2_RPM_CONFIG, val);
3922 break;
3923 }
3924 case PCI_D3hot: {
3925 int i;
3926 u32 val, wol_msg;
3927
3928 if (bp->wol) {
3929 u32 advertising;
3930 u8 autoneg;
3931
3932 autoneg = bp->autoneg;
3933 advertising = bp->advertising;
3934
3935 if (bp->phy_port == PORT_TP) {
3936 bp->autoneg = AUTONEG_SPEED;
3937 bp->advertising = ADVERTISED_10baseT_Half |
3938 ADVERTISED_10baseT_Full |
3939 ADVERTISED_100baseT_Half |
3940 ADVERTISED_100baseT_Full |
3941 ADVERTISED_Autoneg;
3942 }
3943
3944 spin_lock_bh(&bp->phy_lock);
3945 bnx2_setup_phy(bp, bp->phy_port);
3946 spin_unlock_bh(&bp->phy_lock);
3947
3948 bp->autoneg = autoneg;
3949 bp->advertising = advertising;
3950
3951 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3952
3953 val = REG_RD(bp, BNX2_EMAC_MODE);
3954
3955 /* Enable port mode. */
3956 val &= ~BNX2_EMAC_MODE_PORT;
3957 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3958 BNX2_EMAC_MODE_ACPI_RCVD |
3959 BNX2_EMAC_MODE_MPKT;
3960 if (bp->phy_port == PORT_TP)
3961 val |= BNX2_EMAC_MODE_PORT_MII;
3962 else {
3963 val |= BNX2_EMAC_MODE_PORT_GMII;
3964 if (bp->line_speed == SPEED_2500)
3965 val |= BNX2_EMAC_MODE_25G_MODE;
3966 }
3967
3968 REG_WR(bp, BNX2_EMAC_MODE, val);
3969
3970 /* receive all multicast */
3971 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3972 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3973 0xffffffff);
3974 }
3975 REG_WR(bp, BNX2_EMAC_RX_MODE,
3976 BNX2_EMAC_RX_MODE_SORT_MODE);
3977
3978 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3979 BNX2_RPM_SORT_USER0_MC_EN;
3980 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3981 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3982 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3983 BNX2_RPM_SORT_USER0_ENA);
3984
3985 /* Need to enable EMAC and RPM for WOL. */
3986 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3987 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3988 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3989 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3990
3991 val = REG_RD(bp, BNX2_RPM_CONFIG);
3992 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3993 REG_WR(bp, BNX2_RPM_CONFIG, val);
3994
3995 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3996 }
3997 else {
3998 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3999 }
4000
4001 if (!(bp->flags & BNX2_FLAG_NO_WOL))
4002 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4003 1, 0);
4004
4005 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4006 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4007 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4008
4009 if (bp->wol)
4010 pmcsr |= 3;
4011 }
4012 else {
4013 pmcsr |= 3;
4014 }
4015 if (bp->wol) {
4016 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4017 }
4018 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4019 pmcsr);
4020
4021 /* No more memory access after this point until
4022 * device is brought back to D0.
4023 */
4024 udelay(50);
4025 break;
4026 }
4027 default:
4028 return -EINVAL;
4029 }
4030 return 0;
4031 }
4032
4033 static int
4034 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4035 {
4036 u32 val;
4037 int j;
4038
4039 /* Request access to the flash interface. */
4040 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4041 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4042 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4043 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4044 break;
4045
4046 udelay(5);
4047 }
4048
4049 if (j >= NVRAM_TIMEOUT_COUNT)
4050 return -EBUSY;
4051
4052 return 0;
4053 }
4054
4055 static int
4056 bnx2_release_nvram_lock(struct bnx2 *bp)
4057 {
4058 int j;
4059 u32 val;
4060
4061 /* Relinquish nvram interface. */
4062 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4063
4064 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4065 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4066 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4067 break;
4068
4069 udelay(5);
4070 }
4071
4072 if (j >= NVRAM_TIMEOUT_COUNT)
4073 return -EBUSY;
4074
4075 return 0;
4076 }
4077
4078
4079 static int
4080 bnx2_enable_nvram_write(struct bnx2 *bp)
4081 {
4082 u32 val;
4083
4084 val = REG_RD(bp, BNX2_MISC_CFG);
4085 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4086
4087 if (bp->flash_info->flags & BNX2_NV_WREN) {
4088 int j;
4089
4090 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4091 REG_WR(bp, BNX2_NVM_COMMAND,
4092 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4093
4094 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4095 udelay(5);
4096
4097 val = REG_RD(bp, BNX2_NVM_COMMAND);
4098 if (val & BNX2_NVM_COMMAND_DONE)
4099 break;
4100 }
4101
4102 if (j >= NVRAM_TIMEOUT_COUNT)
4103 return -EBUSY;
4104 }
4105 return 0;
4106 }
4107
4108 static void
4109 bnx2_disable_nvram_write(struct bnx2 *bp)
4110 {
4111 u32 val;
4112
4113 val = REG_RD(bp, BNX2_MISC_CFG);
4114 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4115 }
4116
4117
4118 static void
4119 bnx2_enable_nvram_access(struct bnx2 *bp)
4120 {
4121 u32 val;
4122
4123 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4124 /* Enable both bits, even on read. */
4125 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4126 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4127 }
4128
4129 static void
4130 bnx2_disable_nvram_access(struct bnx2 *bp)
4131 {
4132 u32 val;
4133
4134 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4135 /* Disable both bits, even after read. */
4136 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4137 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4138 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4139 }
4140
4141 static int
4142 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4143 {
4144 u32 cmd;
4145 int j;
4146
4147 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4148 /* Buffered flash, no erase needed */
4149 return 0;
4150
4151 /* Build an erase command */
4152 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4153 BNX2_NVM_COMMAND_DOIT;
4154
4155 /* Need to clear DONE bit separately. */
4156 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4157
4158 /* Address of the NVRAM to read from. */
4159 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4160
4161 /* Issue an erase command. */
4162 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4163
4164 /* Wait for completion. */
4165 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4166 u32 val;
4167
4168 udelay(5);
4169
4170 val = REG_RD(bp, BNX2_NVM_COMMAND);
4171 if (val & BNX2_NVM_COMMAND_DONE)
4172 break;
4173 }
4174
4175 if (j >= NVRAM_TIMEOUT_COUNT)
4176 return -EBUSY;
4177
4178 return 0;
4179 }
4180
4181 static int
4182 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4183 {
4184 u32 cmd;
4185 int j;
4186
4187 /* Build the command word. */
4188 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4189
4190 /* Calculate an offset of a buffered flash, not needed for 5709. */
4191 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4192 offset = ((offset / bp->flash_info->page_size) <<
4193 bp->flash_info->page_bits) +
4194 (offset % bp->flash_info->page_size);
4195 }
4196
4197 /* Need to clear DONE bit separately. */
4198 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4199
4200 /* Address of the NVRAM to read from. */
4201 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4202
4203 /* Issue a read command. */
4204 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4205
4206 /* Wait for completion. */
4207 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4208 u32 val;
4209
4210 udelay(5);
4211
4212 val = REG_RD(bp, BNX2_NVM_COMMAND);
4213 if (val & BNX2_NVM_COMMAND_DONE) {
4214 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4215 memcpy(ret_val, &v, 4);
4216 break;
4217 }
4218 }
4219 if (j >= NVRAM_TIMEOUT_COUNT)
4220 return -EBUSY;
4221
4222 return 0;
4223 }
4224
4225
4226 static int
4227 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4228 {
4229 u32 cmd;
4230 __be32 val32;
4231 int j;
4232
4233 /* Build the command word. */
4234 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4235
4236 /* Calculate an offset of a buffered flash, not needed for 5709. */
4237 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4238 offset = ((offset / bp->flash_info->page_size) <<
4239 bp->flash_info->page_bits) +
4240 (offset % bp->flash_info->page_size);
4241 }
4242
4243 /* Need to clear DONE bit separately. */
4244 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4245
4246 memcpy(&val32, val, 4);
4247
4248 /* Write the data. */
4249 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4250
4251 /* Address of the NVRAM to write to. */
4252 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4253
4254 /* Issue the write command. */
4255 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4256
4257 /* Wait for completion. */
4258 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4259 udelay(5);
4260
4261 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4262 break;
4263 }
4264 if (j >= NVRAM_TIMEOUT_COUNT)
4265 return -EBUSY;
4266
4267 return 0;
4268 }
4269
4270 static int
4271 bnx2_init_nvram(struct bnx2 *bp)
4272 {
4273 u32 val;
4274 int j, entry_count, rc = 0;
4275 const struct flash_spec *flash;
4276
4277 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4278 bp->flash_info = &flash_5709;
4279 goto get_flash_size;
4280 }
4281
4282 /* Determine the selected interface. */
4283 val = REG_RD(bp, BNX2_NVM_CFG1);
4284
4285 entry_count = ARRAY_SIZE(flash_table);
4286
4287 if (val & 0x40000000) {
4288
4289 /* Flash interface has been reconfigured */
4290 for (j = 0, flash = &flash_table[0]; j < entry_count;
4291 j++, flash++) {
4292 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4293 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4294 bp->flash_info = flash;
4295 break;
4296 }
4297 }
4298 }
4299 else {
4300 u32 mask;
4301 /* Not yet been reconfigured */
4302
4303 if (val & (1 << 23))
4304 mask = FLASH_BACKUP_STRAP_MASK;
4305 else
4306 mask = FLASH_STRAP_MASK;
4307
4308 for (j = 0, flash = &flash_table[0]; j < entry_count;
4309 j++, flash++) {
4310
4311 if ((val & mask) == (flash->strapping & mask)) {
4312 bp->flash_info = flash;
4313
4314 /* Request access to the flash interface. */
4315 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4316 return rc;
4317
4318 /* Enable access to flash interface */
4319 bnx2_enable_nvram_access(bp);
4320
4321 /* Reconfigure the flash interface */
4322 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4323 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4324 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4325 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4326
4327 /* Disable access to flash interface */
4328 bnx2_disable_nvram_access(bp);
4329 bnx2_release_nvram_lock(bp);
4330
4331 break;
4332 }
4333 }
4334 } /* if (val & 0x40000000) */
4335
4336 if (j == entry_count) {
4337 bp->flash_info = NULL;
4338 pr_alert("Unknown flash/EEPROM type\n");
4339 return -ENODEV;
4340 }
4341
4342 get_flash_size:
4343 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4344 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4345 if (val)
4346 bp->flash_size = val;
4347 else
4348 bp->flash_size = bp->flash_info->total_size;
4349
4350 return rc;
4351 }
4352
4353 static int
4354 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4355 int buf_size)
4356 {
4357 int rc = 0;
4358 u32 cmd_flags, offset32, len32, extra;
4359
4360 if (buf_size == 0)
4361 return 0;
4362
4363 /* Request access to the flash interface. */
4364 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4365 return rc;
4366
4367 /* Enable access to flash interface */
4368 bnx2_enable_nvram_access(bp);
4369
4370 len32 = buf_size;
4371 offset32 = offset;
4372 extra = 0;
4373
4374 cmd_flags = 0;
4375
4376 if (offset32 & 3) {
4377 u8 buf[4];
4378 u32 pre_len;
4379
4380 offset32 &= ~3;
4381 pre_len = 4 - (offset & 3);
4382
4383 if (pre_len >= len32) {
4384 pre_len = len32;
4385 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4386 BNX2_NVM_COMMAND_LAST;
4387 }
4388 else {
4389 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4390 }
4391
4392 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4393
4394 if (rc)
4395 return rc;
4396
4397 memcpy(ret_buf, buf + (offset & 3), pre_len);
4398
4399 offset32 += 4;
4400 ret_buf += pre_len;
4401 len32 -= pre_len;
4402 }
4403 if (len32 & 3) {
4404 extra = 4 - (len32 & 3);
4405 len32 = (len32 + 4) & ~3;
4406 }
4407
4408 if (len32 == 4) {
4409 u8 buf[4];
4410
4411 if (cmd_flags)
4412 cmd_flags = BNX2_NVM_COMMAND_LAST;
4413 else
4414 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4415 BNX2_NVM_COMMAND_LAST;
4416
4417 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4418
4419 memcpy(ret_buf, buf, 4 - extra);
4420 }
4421 else if (len32 > 0) {
4422 u8 buf[4];
4423
4424 /* Read the first word. */
4425 if (cmd_flags)
4426 cmd_flags = 0;
4427 else
4428 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4429
4430 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4431
4432 /* Advance to the next dword. */
4433 offset32 += 4;
4434 ret_buf += 4;
4435 len32 -= 4;
4436
4437 while (len32 > 4 && rc == 0) {
4438 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4439
4440 /* Advance to the next dword. */
4441 offset32 += 4;
4442 ret_buf += 4;
4443 len32 -= 4;
4444 }
4445
4446 if (rc)
4447 return rc;
4448
4449 cmd_flags = BNX2_NVM_COMMAND_LAST;
4450 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4451
4452 memcpy(ret_buf, buf, 4 - extra);
4453 }
4454
4455 /* Disable access to flash interface */
4456 bnx2_disable_nvram_access(bp);
4457
4458 bnx2_release_nvram_lock(bp);
4459
4460 return rc;
4461 }
4462
4463 static int
4464 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4465 int buf_size)
4466 {
4467 u32 written, offset32, len32;
4468 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4469 int rc = 0;
4470 int align_start, align_end;
4471
4472 buf = data_buf;
4473 offset32 = offset;
4474 len32 = buf_size;
4475 align_start = align_end = 0;
4476
4477 if ((align_start = (offset32 & 3))) {
4478 offset32 &= ~3;
4479 len32 += align_start;
4480 if (len32 < 4)
4481 len32 = 4;
4482 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4483 return rc;
4484 }
4485
4486 if (len32 & 3) {
4487 align_end = 4 - (len32 & 3);
4488 len32 += align_end;
4489 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4490 return rc;
4491 }
4492
4493 if (align_start || align_end) {
4494 align_buf = kmalloc(len32, GFP_KERNEL);
4495 if (align_buf == NULL)
4496 return -ENOMEM;
4497 if (align_start) {
4498 memcpy(align_buf, start, 4);
4499 }
4500 if (align_end) {
4501 memcpy(align_buf + len32 - 4, end, 4);
4502 }
4503 memcpy(align_buf + align_start, data_buf, buf_size);
4504 buf = align_buf;
4505 }
4506
4507 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4508 flash_buffer = kmalloc(264, GFP_KERNEL);
4509 if (flash_buffer == NULL) {
4510 rc = -ENOMEM;
4511 goto nvram_write_end;
4512 }
4513 }
4514
4515 written = 0;
4516 while ((written < len32) && (rc == 0)) {
4517 u32 page_start, page_end, data_start, data_end;
4518 u32 addr, cmd_flags;
4519 int i;
4520
4521 /* Find the page_start addr */
4522 page_start = offset32 + written;
4523 page_start -= (page_start % bp->flash_info->page_size);
4524 /* Find the page_end addr */
4525 page_end = page_start + bp->flash_info->page_size;
4526 /* Find the data_start addr */
4527 data_start = (written == 0) ? offset32 : page_start;
4528 /* Find the data_end addr */
4529 data_end = (page_end > offset32 + len32) ?
4530 (offset32 + len32) : page_end;
4531
4532 /* Request access to the flash interface. */
4533 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4534 goto nvram_write_end;
4535
4536 /* Enable access to flash interface */
4537 bnx2_enable_nvram_access(bp);
4538
4539 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4540 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4541 int j;
4542
4543 /* Read the whole page into the buffer
4544 * (non-buffer flash only) */
4545 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4546 if (j == (bp->flash_info->page_size - 4)) {
4547 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4548 }
4549 rc = bnx2_nvram_read_dword(bp,
4550 page_start + j,
4551 &flash_buffer[j],
4552 cmd_flags);
4553
4554 if (rc)
4555 goto nvram_write_end;
4556
4557 cmd_flags = 0;
4558 }
4559 }
4560
4561 /* Enable writes to flash interface (unlock write-protect) */
4562 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4563 goto nvram_write_end;
4564
4565 /* Loop to write back the buffer data from page_start to
4566 * data_start */
4567 i = 0;
4568 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4569 /* Erase the page */
4570 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4571 goto nvram_write_end;
4572
4573 /* Re-enable the write again for the actual write */
4574 bnx2_enable_nvram_write(bp);
4575
4576 for (addr = page_start; addr < data_start;
4577 addr += 4, i += 4) {
4578
4579 rc = bnx2_nvram_write_dword(bp, addr,
4580 &flash_buffer[i], cmd_flags);
4581
4582 if (rc != 0)
4583 goto nvram_write_end;
4584
4585 cmd_flags = 0;
4586 }
4587 }
4588
4589 /* Loop to write the new data from data_start to data_end */
4590 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4591 if ((addr == page_end - 4) ||
4592 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4593 (addr == data_end - 4))) {
4594
4595 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4596 }
4597 rc = bnx2_nvram_write_dword(bp, addr, buf,
4598 cmd_flags);
4599
4600 if (rc != 0)
4601 goto nvram_write_end;
4602
4603 cmd_flags = 0;
4604 buf += 4;
4605 }
4606
4607 /* Loop to write back the buffer data from data_end
4608 * to page_end */
4609 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4610 for (addr = data_end; addr < page_end;
4611 addr += 4, i += 4) {
4612
4613 if (addr == page_end-4) {
4614 cmd_flags = BNX2_NVM_COMMAND_LAST;
4615 }
4616 rc = bnx2_nvram_write_dword(bp, addr,
4617 &flash_buffer[i], cmd_flags);
4618
4619 if (rc != 0)
4620 goto nvram_write_end;
4621
4622 cmd_flags = 0;
4623 }
4624 }
4625
4626 /* Disable writes to flash interface (lock write-protect) */
4627 bnx2_disable_nvram_write(bp);
4628
4629 /* Disable access to flash interface */
4630 bnx2_disable_nvram_access(bp);
4631 bnx2_release_nvram_lock(bp);
4632
4633 /* Increment written */
4634 written += data_end - data_start;
4635 }
4636
4637 nvram_write_end:
4638 kfree(flash_buffer);
4639 kfree(align_buf);
4640 return rc;
4641 }
4642
4643 static void
4644 bnx2_init_fw_cap(struct bnx2 *bp)
4645 {
4646 u32 val, sig = 0;
4647
4648 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4649 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4650
4651 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4652 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4653
4654 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4655 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4656 return;
4657
4658 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4659 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4660 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4661 }
4662
4663 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4664 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4665 u32 link;
4666
4667 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4668
4669 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4670 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4671 bp->phy_port = PORT_FIBRE;
4672 else
4673 bp->phy_port = PORT_TP;
4674
4675 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4676 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4677 }
4678
4679 if (netif_running(bp->dev) && sig)
4680 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4681 }
4682
4683 static void
4684 bnx2_setup_msix_tbl(struct bnx2 *bp)
4685 {
4686 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4687
4688 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4689 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4690 }
4691
4692 static int
4693 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4694 {
4695 u32 val;
4696 int i, rc = 0;
4697 u8 old_port;
4698
4699 /* Wait for the current PCI transaction to complete before
4700 * issuing a reset. */
4701 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4702 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4703 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4704 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4705 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4706 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4707 udelay(5);
4708
4709 /* Wait for the firmware to tell us it is ok to issue a reset. */
4710 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4711
4712 /* Deposit a driver reset signature so the firmware knows that
4713 * this is a soft reset. */
4714 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4715 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4716
4717 /* Do a dummy read to force the chip to complete all current transaction
4718 * before we issue a reset. */
4719 val = REG_RD(bp, BNX2_MISC_ID);
4720
4721 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4722 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4723 REG_RD(bp, BNX2_MISC_COMMAND);
4724 udelay(5);
4725
4726 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4727 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4728
4729 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4730
4731 } else {
4732 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4733 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4734 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4735
4736 /* Chip reset. */
4737 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4738
4739 /* Reading back any register after chip reset will hang the
4740 * bus on 5706 A0 and A1. The msleep below provides plenty
4741 * of margin for write posting.
4742 */
4743 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4744 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4745 msleep(20);
4746
4747 /* Reset takes approximate 30 usec */
4748 for (i = 0; i < 10; i++) {
4749 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4750 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4751 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4752 break;
4753 udelay(10);
4754 }
4755
4756 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4757 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4758 pr_err("Chip reset did not complete\n");
4759 return -EBUSY;
4760 }
4761 }
4762
4763 /* Make sure byte swapping is properly configured. */
4764 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4765 if (val != 0x01020304) {
4766 pr_err("Chip not in correct endian mode\n");
4767 return -ENODEV;
4768 }
4769
4770 /* Wait for the firmware to finish its initialization. */
4771 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4772 if (rc)
4773 return rc;
4774
4775 spin_lock_bh(&bp->phy_lock);
4776 old_port = bp->phy_port;
4777 bnx2_init_fw_cap(bp);
4778 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4779 old_port != bp->phy_port)
4780 bnx2_set_default_remote_link(bp);
4781 spin_unlock_bh(&bp->phy_lock);
4782
4783 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4784 /* Adjust the voltage regular to two steps lower. The default
4785 * of this register is 0x0000000e. */
4786 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4787
4788 /* Remove bad rbuf memory from the free pool. */
4789 rc = bnx2_alloc_bad_rbuf(bp);
4790 }
4791
4792 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4793 bnx2_setup_msix_tbl(bp);
4794 /* Prevent MSIX table reads and write from timing out */
4795 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4796 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4797 }
4798
4799 return rc;
4800 }
4801
4802 static int
4803 bnx2_init_chip(struct bnx2 *bp)
4804 {
4805 u32 val, mtu;
4806 int rc, i;
4807
4808 /* Make sure the interrupt is not active. */
4809 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4810
4811 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4812 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4813 #ifdef __BIG_ENDIAN
4814 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4815 #endif
4816 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4817 DMA_READ_CHANS << 12 |
4818 DMA_WRITE_CHANS << 16;
4819
4820 val |= (0x2 << 20) | (1 << 11);
4821
4822 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4823 val |= (1 << 23);
4824
4825 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4826 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4827 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4828
4829 REG_WR(bp, BNX2_DMA_CONFIG, val);
4830
4831 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4832 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4833 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4834 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4835 }
4836
4837 if (bp->flags & BNX2_FLAG_PCIX) {
4838 u16 val16;
4839
4840 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4841 &val16);
4842 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4843 val16 & ~PCI_X_CMD_ERO);
4844 }
4845
4846 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4847 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4848 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4849 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4850
4851 /* Initialize context mapping and zero out the quick contexts. The
4852 * context block must have already been enabled. */
4853 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4854 rc = bnx2_init_5709_context(bp);
4855 if (rc)
4856 return rc;
4857 } else
4858 bnx2_init_context(bp);
4859
4860 if ((rc = bnx2_init_cpus(bp)) != 0)
4861 return rc;
4862
4863 bnx2_init_nvram(bp);
4864
4865 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4866
4867 val = REG_RD(bp, BNX2_MQ_CONFIG);
4868 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4869 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4870 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4871 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4872 if (CHIP_REV(bp) == CHIP_REV_Ax)
4873 val |= BNX2_MQ_CONFIG_HALT_DIS;
4874 }
4875
4876 REG_WR(bp, BNX2_MQ_CONFIG, val);
4877
4878 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4879 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4880 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4881
4882 val = (BCM_PAGE_BITS - 8) << 24;
4883 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4884
4885 /* Configure page size. */
4886 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4887 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4888 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4889 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4890
4891 val = bp->mac_addr[0] +
4892 (bp->mac_addr[1] << 8) +
4893 (bp->mac_addr[2] << 16) +
4894 bp->mac_addr[3] +
4895 (bp->mac_addr[4] << 8) +
4896 (bp->mac_addr[5] << 16);
4897 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4898
4899 /* Program the MTU. Also include 4 bytes for CRC32. */
4900 mtu = bp->dev->mtu;
4901 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4902 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4903 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4904 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4905
4906 if (mtu < 1500)
4907 mtu = 1500;
4908
4909 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4910 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4911 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4912
4913 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4914 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4915 bp->bnx2_napi[i].last_status_idx = 0;
4916
4917 bp->idle_chk_status_idx = 0xffff;
4918
4919 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4920
4921 /* Set up how to generate a link change interrupt. */
4922 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4923
4924 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4925 (u64) bp->status_blk_mapping & 0xffffffff);
4926 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4927
4928 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4929 (u64) bp->stats_blk_mapping & 0xffffffff);
4930 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4931 (u64) bp->stats_blk_mapping >> 32);
4932
4933 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4934 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4935
4936 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4937 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4938
4939 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4940 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4941
4942 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4943
4944 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4945
4946 REG_WR(bp, BNX2_HC_COM_TICKS,
4947 (bp->com_ticks_int << 16) | bp->com_ticks);
4948
4949 REG_WR(bp, BNX2_HC_CMD_TICKS,
4950 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4951
4952 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4953 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4954 else
4955 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4956 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4957
4958 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4959 val = BNX2_HC_CONFIG_COLLECT_STATS;
4960 else {
4961 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4962 BNX2_HC_CONFIG_COLLECT_STATS;
4963 }
4964
4965 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4966 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4967 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4968
4969 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4970 }
4971
4972 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4973 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4974
4975 REG_WR(bp, BNX2_HC_CONFIG, val);
4976
4977 for (i = 1; i < bp->irq_nvecs; i++) {
4978 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4979 BNX2_HC_SB_CONFIG_1;
4980
4981 REG_WR(bp, base,
4982 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4983 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4984 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4985
4986 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4987 (bp->tx_quick_cons_trip_int << 16) |
4988 bp->tx_quick_cons_trip);
4989
4990 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4991 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4992
4993 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4994 (bp->rx_quick_cons_trip_int << 16) |
4995 bp->rx_quick_cons_trip);
4996
4997 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4998 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4999 }
5000
5001 /* Clear internal stats counters. */
5002 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5003
5004 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5005
5006 /* Initialize the receive filter. */
5007 bnx2_set_rx_mode(bp->dev);
5008
5009 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5010 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5011 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5012 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5013 }
5014 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5015 1, 0);
5016
5017 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5018 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5019
5020 udelay(20);
5021
5022 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5023
5024 return rc;
5025 }
5026
5027 static void
5028 bnx2_clear_ring_states(struct bnx2 *bp)
5029 {
5030 struct bnx2_napi *bnapi;
5031 struct bnx2_tx_ring_info *txr;
5032 struct bnx2_rx_ring_info *rxr;
5033 int i;
5034
5035 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5036 bnapi = &bp->bnx2_napi[i];
5037 txr = &bnapi->tx_ring;
5038 rxr = &bnapi->rx_ring;
5039
5040 txr->tx_cons = 0;
5041 txr->hw_tx_cons = 0;
5042 rxr->rx_prod_bseq = 0;
5043 rxr->rx_prod = 0;
5044 rxr->rx_cons = 0;
5045 rxr->rx_pg_prod = 0;
5046 rxr->rx_pg_cons = 0;
5047 }
5048 }
5049
5050 static void
5051 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5052 {
5053 u32 val, offset0, offset1, offset2, offset3;
5054 u32 cid_addr = GET_CID_ADDR(cid);
5055
5056 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5057 offset0 = BNX2_L2CTX_TYPE_XI;
5058 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5059 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5060 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5061 } else {
5062 offset0 = BNX2_L2CTX_TYPE;
5063 offset1 = BNX2_L2CTX_CMD_TYPE;
5064 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5065 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5066 }
5067 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5068 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5069
5070 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5071 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5072
5073 val = (u64) txr->tx_desc_mapping >> 32;
5074 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5075
5076 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5077 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5078 }
5079
5080 static void
5081 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5082 {
5083 struct tx_bd *txbd;
5084 u32 cid = TX_CID;
5085 struct bnx2_napi *bnapi;
5086 struct bnx2_tx_ring_info *txr;
5087
5088 bnapi = &bp->bnx2_napi[ring_num];
5089 txr = &bnapi->tx_ring;
5090
5091 if (ring_num == 0)
5092 cid = TX_CID;
5093 else
5094 cid = TX_TSS_CID + ring_num - 1;
5095
5096 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5097
5098 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5099
5100 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5101 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5102
5103 txr->tx_prod = 0;
5104 txr->tx_prod_bseq = 0;
5105
5106 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5107 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5108
5109 bnx2_init_tx_context(bp, cid, txr);
5110 }
5111
5112 static void
5113 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5114 int num_rings)
5115 {
5116 int i;
5117 struct rx_bd *rxbd;
5118
5119 for (i = 0; i < num_rings; i++) {
5120 int j;
5121
5122 rxbd = &rx_ring[i][0];
5123 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5124 rxbd->rx_bd_len = buf_size;
5125 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5126 }
5127 if (i == (num_rings - 1))
5128 j = 0;
5129 else
5130 j = i + 1;
5131 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5132 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5133 }
5134 }
5135
5136 static void
5137 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5138 {
5139 int i;
5140 u16 prod, ring_prod;
5141 u32 cid, rx_cid_addr, val;
5142 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5143 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5144
5145 if (ring_num == 0)
5146 cid = RX_CID;
5147 else
5148 cid = RX_RSS_CID + ring_num - 1;
5149
5150 rx_cid_addr = GET_CID_ADDR(cid);
5151
5152 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5153 bp->rx_buf_use_size, bp->rx_max_ring);
5154
5155 bnx2_init_rx_context(bp, cid);
5156
5157 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5158 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5159 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5160 }
5161
5162 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5163 if (bp->rx_pg_ring_size) {
5164 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5165 rxr->rx_pg_desc_mapping,
5166 PAGE_SIZE, bp->rx_max_pg_ring);
5167 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5168 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5169 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5170 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5171
5172 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5173 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5174
5175 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5176 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5177
5178 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5179 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5180 }
5181
5182 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5183 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5184
5185 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5186 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5187
5188 ring_prod = prod = rxr->rx_pg_prod;
5189 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5190 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5191 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5192 ring_num, i, bp->rx_pg_ring_size);
5193 break;
5194 }
5195 prod = NEXT_RX_BD(prod);
5196 ring_prod = RX_PG_RING_IDX(prod);
5197 }
5198 rxr->rx_pg_prod = prod;
5199
5200 ring_prod = prod = rxr->rx_prod;
5201 for (i = 0; i < bp->rx_ring_size; i++) {
5202 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5203 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5204 ring_num, i, bp->rx_ring_size);
5205 break;
5206 }
5207 prod = NEXT_RX_BD(prod);
5208 ring_prod = RX_RING_IDX(prod);
5209 }
5210 rxr->rx_prod = prod;
5211
5212 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5213 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5214 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5215
5216 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5217 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5218
5219 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5220 }
5221
5222 static void
5223 bnx2_init_all_rings(struct bnx2 *bp)
5224 {
5225 int i;
5226 u32 val;
5227
5228 bnx2_clear_ring_states(bp);
5229
5230 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5231 for (i = 0; i < bp->num_tx_rings; i++)
5232 bnx2_init_tx_ring(bp, i);
5233
5234 if (bp->num_tx_rings > 1)
5235 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5236 (TX_TSS_CID << 7));
5237
5238 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5239 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5240
5241 for (i = 0; i < bp->num_rx_rings; i++)
5242 bnx2_init_rx_ring(bp, i);
5243
5244 if (bp->num_rx_rings > 1) {
5245 u32 tbl_32;
5246 u8 *tbl = (u8 *) &tbl_32;
5247
5248 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5249 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5250
5251 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5252 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5253 if ((i % 4) == 3)
5254 bnx2_reg_wr_ind(bp,
5255 BNX2_RXP_SCRATCH_RSS_TBL + i,
5256 cpu_to_be32(tbl_32));
5257 }
5258
5259 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5260 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5261
5262 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5263
5264 }
5265 }
5266
5267 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5268 {
5269 u32 max, num_rings = 1;
5270
5271 while (ring_size > MAX_RX_DESC_CNT) {
5272 ring_size -= MAX_RX_DESC_CNT;
5273 num_rings++;
5274 }
5275 /* round to next power of 2 */
5276 max = max_size;
5277 while ((max & num_rings) == 0)
5278 max >>= 1;
5279
5280 if (num_rings != max)
5281 max <<= 1;
5282
5283 return max;
5284 }
5285
5286 static void
5287 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5288 {
5289 u32 rx_size, rx_space, jumbo_size;
5290
5291 /* 8 for CRC and VLAN */
5292 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5293
5294 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5295 sizeof(struct skb_shared_info);
5296
5297 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5298 bp->rx_pg_ring_size = 0;
5299 bp->rx_max_pg_ring = 0;
5300 bp->rx_max_pg_ring_idx = 0;
5301 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5302 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5303
5304 jumbo_size = size * pages;
5305 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5306 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5307
5308 bp->rx_pg_ring_size = jumbo_size;
5309 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5310 MAX_RX_PG_RINGS);
5311 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5312 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5313 bp->rx_copy_thresh = 0;
5314 }
5315
5316 bp->rx_buf_use_size = rx_size;
5317 /* hw alignment */
5318 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5319 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5320 bp->rx_ring_size = size;
5321 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5322 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5323 }
5324
5325 static void
5326 bnx2_free_tx_skbs(struct bnx2 *bp)
5327 {
5328 int i;
5329
5330 for (i = 0; i < bp->num_tx_rings; i++) {
5331 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5332 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5333 int j;
5334
5335 if (txr->tx_buf_ring == NULL)
5336 continue;
5337
5338 for (j = 0; j < TX_DESC_CNT; ) {
5339 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5340 struct sk_buff *skb = tx_buf->skb;
5341 int k, last;
5342
5343 if (skb == NULL) {
5344 j++;
5345 continue;
5346 }
5347
5348 dma_unmap_single(&bp->pdev->dev,
5349 dma_unmap_addr(tx_buf, mapping),
5350 skb_headlen(skb),
5351 PCI_DMA_TODEVICE);
5352
5353 tx_buf->skb = NULL;
5354
5355 last = tx_buf->nr_frags;
5356 j++;
5357 for (k = 0; k < last; k++, j++) {
5358 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5359 dma_unmap_page(&bp->pdev->dev,
5360 dma_unmap_addr(tx_buf, mapping),
5361 skb_shinfo(skb)->frags[k].size,
5362 PCI_DMA_TODEVICE);
5363 }
5364 dev_kfree_skb(skb);
5365 }
5366 }
5367 }
5368
5369 static void
5370 bnx2_free_rx_skbs(struct bnx2 *bp)
5371 {
5372 int i;
5373
5374 for (i = 0; i < bp->num_rx_rings; i++) {
5375 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5376 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5377 int j;
5378
5379 if (rxr->rx_buf_ring == NULL)
5380 return;
5381
5382 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5383 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5384 struct sk_buff *skb = rx_buf->skb;
5385
5386 if (skb == NULL)
5387 continue;
5388
5389 dma_unmap_single(&bp->pdev->dev,
5390 dma_unmap_addr(rx_buf, mapping),
5391 bp->rx_buf_use_size,
5392 PCI_DMA_FROMDEVICE);
5393
5394 rx_buf->skb = NULL;
5395
5396 dev_kfree_skb(skb);
5397 }
5398 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5399 bnx2_free_rx_page(bp, rxr, j);
5400 }
5401 }
5402
5403 static void
5404 bnx2_free_skbs(struct bnx2 *bp)
5405 {
5406 bnx2_free_tx_skbs(bp);
5407 bnx2_free_rx_skbs(bp);
5408 }
5409
5410 static int
5411 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5412 {
5413 int rc;
5414
5415 rc = bnx2_reset_chip(bp, reset_code);
5416 bnx2_free_skbs(bp);
5417 if (rc)
5418 return rc;
5419
5420 if ((rc = bnx2_init_chip(bp)) != 0)
5421 return rc;
5422
5423 bnx2_init_all_rings(bp);
5424 return 0;
5425 }
5426
5427 static int
5428 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5429 {
5430 int rc;
5431
5432 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5433 return rc;
5434
5435 spin_lock_bh(&bp->phy_lock);
5436 bnx2_init_phy(bp, reset_phy);
5437 bnx2_set_link(bp);
5438 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5439 bnx2_remote_phy_event(bp);
5440 spin_unlock_bh(&bp->phy_lock);
5441 return 0;
5442 }
5443
5444 static int
5445 bnx2_shutdown_chip(struct bnx2 *bp)
5446 {
5447 u32 reset_code;
5448
5449 if (bp->flags & BNX2_FLAG_NO_WOL)
5450 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5451 else if (bp->wol)
5452 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5453 else
5454 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5455
5456 return bnx2_reset_chip(bp, reset_code);
5457 }
5458
5459 static int
5460 bnx2_test_registers(struct bnx2 *bp)
5461 {
5462 int ret;
5463 int i, is_5709;
5464 static const struct {
5465 u16 offset;
5466 u16 flags;
5467 #define BNX2_FL_NOT_5709 1
5468 u32 rw_mask;
5469 u32 ro_mask;
5470 } reg_tbl[] = {
5471 { 0x006c, 0, 0x00000000, 0x0000003f },
5472 { 0x0090, 0, 0xffffffff, 0x00000000 },
5473 { 0x0094, 0, 0x00000000, 0x00000000 },
5474
5475 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5476 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5477 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5478 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5479 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5480 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5481 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5482 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5483 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5484
5485 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5486 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5487 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5488 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5489 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5490 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5491
5492 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5493 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5494 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5495
5496 { 0x1000, 0, 0x00000000, 0x00000001 },
5497 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5498
5499 { 0x1408, 0, 0x01c00800, 0x00000000 },
5500 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5501 { 0x14a8, 0, 0x00000000, 0x000001ff },
5502 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5503 { 0x14b0, 0, 0x00000002, 0x00000001 },
5504 { 0x14b8, 0, 0x00000000, 0x00000000 },
5505 { 0x14c0, 0, 0x00000000, 0x00000009 },
5506 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5507 { 0x14cc, 0, 0x00000000, 0x00000001 },
5508 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5509
5510 { 0x1800, 0, 0x00000000, 0x00000001 },
5511 { 0x1804, 0, 0x00000000, 0x00000003 },
5512
5513 { 0x2800, 0, 0x00000000, 0x00000001 },
5514 { 0x2804, 0, 0x00000000, 0x00003f01 },
5515 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5516 { 0x2810, 0, 0xffff0000, 0x00000000 },
5517 { 0x2814, 0, 0xffff0000, 0x00000000 },
5518 { 0x2818, 0, 0xffff0000, 0x00000000 },
5519 { 0x281c, 0, 0xffff0000, 0x00000000 },
5520 { 0x2834, 0, 0xffffffff, 0x00000000 },
5521 { 0x2840, 0, 0x00000000, 0xffffffff },
5522 { 0x2844, 0, 0x00000000, 0xffffffff },
5523 { 0x2848, 0, 0xffffffff, 0x00000000 },
5524 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5525
5526 { 0x2c00, 0, 0x00000000, 0x00000011 },
5527 { 0x2c04, 0, 0x00000000, 0x00030007 },
5528
5529 { 0x3c00, 0, 0x00000000, 0x00000001 },
5530 { 0x3c04, 0, 0x00000000, 0x00070000 },
5531 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5532 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5533 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5534 { 0x3c14, 0, 0x00000000, 0xffffffff },
5535 { 0x3c18, 0, 0x00000000, 0xffffffff },
5536 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5537 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5538
5539 { 0x5004, 0, 0x00000000, 0x0000007f },
5540 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5541
5542 { 0x5c00, 0, 0x00000000, 0x00000001 },
5543 { 0x5c04, 0, 0x00000000, 0x0003000f },
5544 { 0x5c08, 0, 0x00000003, 0x00000000 },
5545 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5546 { 0x5c10, 0, 0x00000000, 0xffffffff },
5547 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5548 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5549 { 0x5c88, 0, 0x00000000, 0x00077373 },
5550 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5551
5552 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5553 { 0x680c, 0, 0xffffffff, 0x00000000 },
5554 { 0x6810, 0, 0xffffffff, 0x00000000 },
5555 { 0x6814, 0, 0xffffffff, 0x00000000 },
5556 { 0x6818, 0, 0xffffffff, 0x00000000 },
5557 { 0x681c, 0, 0xffffffff, 0x00000000 },
5558 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5559 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5560 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5561 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5562 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5563 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5564 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5565 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5566 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5567 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5568 { 0x684c, 0, 0xffffffff, 0x00000000 },
5569 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5570 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5571 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5572 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5573 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5574 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5575
5576 { 0xffff, 0, 0x00000000, 0x00000000 },
5577 };
5578
5579 ret = 0;
5580 is_5709 = 0;
5581 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5582 is_5709 = 1;
5583
5584 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5585 u32 offset, rw_mask, ro_mask, save_val, val;
5586 u16 flags = reg_tbl[i].flags;
5587
5588 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5589 continue;
5590
5591 offset = (u32) reg_tbl[i].offset;
5592 rw_mask = reg_tbl[i].rw_mask;
5593 ro_mask = reg_tbl[i].ro_mask;
5594
5595 save_val = readl(bp->regview + offset);
5596
5597 writel(0, bp->regview + offset);
5598
5599 val = readl(bp->regview + offset);
5600 if ((val & rw_mask) != 0) {
5601 goto reg_test_err;
5602 }
5603
5604 if ((val & ro_mask) != (save_val & ro_mask)) {
5605 goto reg_test_err;
5606 }
5607
5608 writel(0xffffffff, bp->regview + offset);
5609
5610 val = readl(bp->regview + offset);
5611 if ((val & rw_mask) != rw_mask) {
5612 goto reg_test_err;
5613 }
5614
5615 if ((val & ro_mask) != (save_val & ro_mask)) {
5616 goto reg_test_err;
5617 }
5618
5619 writel(save_val, bp->regview + offset);
5620 continue;
5621
5622 reg_test_err:
5623 writel(save_val, bp->regview + offset);
5624 ret = -ENODEV;
5625 break;
5626 }
5627 return ret;
5628 }
5629
5630 static int
5631 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5632 {
5633 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5634 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5635 int i;
5636
5637 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5638 u32 offset;
5639
5640 for (offset = 0; offset < size; offset += 4) {
5641
5642 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5643
5644 if (bnx2_reg_rd_ind(bp, start + offset) !=
5645 test_pattern[i]) {
5646 return -ENODEV;
5647 }
5648 }
5649 }
5650 return 0;
5651 }
5652
5653 static int
5654 bnx2_test_memory(struct bnx2 *bp)
5655 {
5656 int ret = 0;
5657 int i;
5658 static struct mem_entry {
5659 u32 offset;
5660 u32 len;
5661 } mem_tbl_5706[] = {
5662 { 0x60000, 0x4000 },
5663 { 0xa0000, 0x3000 },
5664 { 0xe0000, 0x4000 },
5665 { 0x120000, 0x4000 },
5666 { 0x1a0000, 0x4000 },
5667 { 0x160000, 0x4000 },
5668 { 0xffffffff, 0 },
5669 },
5670 mem_tbl_5709[] = {
5671 { 0x60000, 0x4000 },
5672 { 0xa0000, 0x3000 },
5673 { 0xe0000, 0x4000 },
5674 { 0x120000, 0x4000 },
5675 { 0x1a0000, 0x4000 },
5676 { 0xffffffff, 0 },
5677 };
5678 struct mem_entry *mem_tbl;
5679
5680 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5681 mem_tbl = mem_tbl_5709;
5682 else
5683 mem_tbl = mem_tbl_5706;
5684
5685 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5686 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5687 mem_tbl[i].len)) != 0) {
5688 return ret;
5689 }
5690 }
5691
5692 return ret;
5693 }
5694
5695 #define BNX2_MAC_LOOPBACK 0
5696 #define BNX2_PHY_LOOPBACK 1
5697
5698 static int
5699 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5700 {
5701 unsigned int pkt_size, num_pkts, i;
5702 struct sk_buff *skb, *rx_skb;
5703 unsigned char *packet;
5704 u16 rx_start_idx, rx_idx;
5705 dma_addr_t map;
5706 struct tx_bd *txbd;
5707 struct sw_bd *rx_buf;
5708 struct l2_fhdr *rx_hdr;
5709 int ret = -ENODEV;
5710 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5711 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5712 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5713
5714 tx_napi = bnapi;
5715
5716 txr = &tx_napi->tx_ring;
5717 rxr = &bnapi->rx_ring;
5718 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5719 bp->loopback = MAC_LOOPBACK;
5720 bnx2_set_mac_loopback(bp);
5721 }
5722 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5723 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5724 return 0;
5725
5726 bp->loopback = PHY_LOOPBACK;
5727 bnx2_set_phy_loopback(bp);
5728 }
5729 else
5730 return -EINVAL;
5731
5732 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5733 skb = netdev_alloc_skb(bp->dev, pkt_size);
5734 if (!skb)
5735 return -ENOMEM;
5736 packet = skb_put(skb, pkt_size);
5737 memcpy(packet, bp->dev->dev_addr, 6);
5738 memset(packet + 6, 0x0, 8);
5739 for (i = 14; i < pkt_size; i++)
5740 packet[i] = (unsigned char) (i & 0xff);
5741
5742 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5743 PCI_DMA_TODEVICE);
5744 if (dma_mapping_error(&bp->pdev->dev, map)) {
5745 dev_kfree_skb(skb);
5746 return -EIO;
5747 }
5748
5749 REG_WR(bp, BNX2_HC_COMMAND,
5750 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5751
5752 REG_RD(bp, BNX2_HC_COMMAND);
5753
5754 udelay(5);
5755 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5756
5757 num_pkts = 0;
5758
5759 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5760
5761 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5762 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5763 txbd->tx_bd_mss_nbytes = pkt_size;
5764 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5765
5766 num_pkts++;
5767 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5768 txr->tx_prod_bseq += pkt_size;
5769
5770 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5771 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5772
5773 udelay(100);
5774
5775 REG_WR(bp, BNX2_HC_COMMAND,
5776 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5777
5778 REG_RD(bp, BNX2_HC_COMMAND);
5779
5780 udelay(5);
5781
5782 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5783 dev_kfree_skb(skb);
5784
5785 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5786 goto loopback_test_done;
5787
5788 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5789 if (rx_idx != rx_start_idx + num_pkts) {
5790 goto loopback_test_done;
5791 }
5792
5793 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5794 rx_skb = rx_buf->skb;
5795
5796 rx_hdr = rx_buf->desc;
5797 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5798
5799 dma_sync_single_for_cpu(&bp->pdev->dev,
5800 dma_unmap_addr(rx_buf, mapping),
5801 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5802
5803 if (rx_hdr->l2_fhdr_status &
5804 (L2_FHDR_ERRORS_BAD_CRC |
5805 L2_FHDR_ERRORS_PHY_DECODE |
5806 L2_FHDR_ERRORS_ALIGNMENT |
5807 L2_FHDR_ERRORS_TOO_SHORT |
5808 L2_FHDR_ERRORS_GIANT_FRAME)) {
5809
5810 goto loopback_test_done;
5811 }
5812
5813 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5814 goto loopback_test_done;
5815 }
5816
5817 for (i = 14; i < pkt_size; i++) {
5818 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5819 goto loopback_test_done;
5820 }
5821 }
5822
5823 ret = 0;
5824
5825 loopback_test_done:
5826 bp->loopback = 0;
5827 return ret;
5828 }
5829
5830 #define BNX2_MAC_LOOPBACK_FAILED 1
5831 #define BNX2_PHY_LOOPBACK_FAILED 2
5832 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5833 BNX2_PHY_LOOPBACK_FAILED)
5834
5835 static int
5836 bnx2_test_loopback(struct bnx2 *bp)
5837 {
5838 int rc = 0;
5839
5840 if (!netif_running(bp->dev))
5841 return BNX2_LOOPBACK_FAILED;
5842
5843 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5844 spin_lock_bh(&bp->phy_lock);
5845 bnx2_init_phy(bp, 1);
5846 spin_unlock_bh(&bp->phy_lock);
5847 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5848 rc |= BNX2_MAC_LOOPBACK_FAILED;
5849 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5850 rc |= BNX2_PHY_LOOPBACK_FAILED;
5851 return rc;
5852 }
5853
5854 #define NVRAM_SIZE 0x200
5855 #define CRC32_RESIDUAL 0xdebb20e3
5856
5857 static int
5858 bnx2_test_nvram(struct bnx2 *bp)
5859 {
5860 __be32 buf[NVRAM_SIZE / 4];
5861 u8 *data = (u8 *) buf;
5862 int rc = 0;
5863 u32 magic, csum;
5864
5865 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5866 goto test_nvram_done;
5867
5868 magic = be32_to_cpu(buf[0]);
5869 if (magic != 0x669955aa) {
5870 rc = -ENODEV;
5871 goto test_nvram_done;
5872 }
5873
5874 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5875 goto test_nvram_done;
5876
5877 csum = ether_crc_le(0x100, data);
5878 if (csum != CRC32_RESIDUAL) {
5879 rc = -ENODEV;
5880 goto test_nvram_done;
5881 }
5882
5883 csum = ether_crc_le(0x100, data + 0x100);
5884 if (csum != CRC32_RESIDUAL) {
5885 rc = -ENODEV;
5886 }
5887
5888 test_nvram_done:
5889 return rc;
5890 }
5891
5892 static int
5893 bnx2_test_link(struct bnx2 *bp)
5894 {
5895 u32 bmsr;
5896
5897 if (!netif_running(bp->dev))
5898 return -ENODEV;
5899
5900 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5901 if (bp->link_up)
5902 return 0;
5903 return -ENODEV;
5904 }
5905 spin_lock_bh(&bp->phy_lock);
5906 bnx2_enable_bmsr1(bp);
5907 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5908 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5909 bnx2_disable_bmsr1(bp);
5910 spin_unlock_bh(&bp->phy_lock);
5911
5912 if (bmsr & BMSR_LSTATUS) {
5913 return 0;
5914 }
5915 return -ENODEV;
5916 }
5917
5918 static int
5919 bnx2_test_intr(struct bnx2 *bp)
5920 {
5921 int i;
5922 u16 status_idx;
5923
5924 if (!netif_running(bp->dev))
5925 return -ENODEV;
5926
5927 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5928
5929 /* This register is not touched during run-time. */
5930 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5931 REG_RD(bp, BNX2_HC_COMMAND);
5932
5933 for (i = 0; i < 10; i++) {
5934 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5935 status_idx) {
5936
5937 break;
5938 }
5939
5940 msleep_interruptible(10);
5941 }
5942 if (i < 10)
5943 return 0;
5944
5945 return -ENODEV;
5946 }
5947
5948 /* Determining link for parallel detection. */
5949 static int
5950 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5951 {
5952 u32 mode_ctl, an_dbg, exp;
5953
5954 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5955 return 0;
5956
5957 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5958 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5959
5960 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5961 return 0;
5962
5963 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5964 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5965 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5966
5967 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5968 return 0;
5969
5970 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5971 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5972 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5973
5974 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5975 return 0;
5976
5977 return 1;
5978 }
5979
5980 static void
5981 bnx2_5706_serdes_timer(struct bnx2 *bp)
5982 {
5983 int check_link = 1;
5984
5985 spin_lock(&bp->phy_lock);
5986 if (bp->serdes_an_pending) {
5987 bp->serdes_an_pending--;
5988 check_link = 0;
5989 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5990 u32 bmcr;
5991
5992 bp->current_interval = BNX2_TIMER_INTERVAL;
5993
5994 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5995
5996 if (bmcr & BMCR_ANENABLE) {
5997 if (bnx2_5706_serdes_has_link(bp)) {
5998 bmcr &= ~BMCR_ANENABLE;
5999 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6000 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6001 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6002 }
6003 }
6004 }
6005 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6006 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6007 u32 phy2;
6008
6009 bnx2_write_phy(bp, 0x17, 0x0f01);
6010 bnx2_read_phy(bp, 0x15, &phy2);
6011 if (phy2 & 0x20) {
6012 u32 bmcr;
6013
6014 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6015 bmcr |= BMCR_ANENABLE;
6016 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6017
6018 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6019 }
6020 } else
6021 bp->current_interval = BNX2_TIMER_INTERVAL;
6022
6023 if (check_link) {
6024 u32 val;
6025
6026 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6027 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6028 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6029
6030 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6031 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6032 bnx2_5706s_force_link_dn(bp, 1);
6033 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6034 } else
6035 bnx2_set_link(bp);
6036 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6037 bnx2_set_link(bp);
6038 }
6039 spin_unlock(&bp->phy_lock);
6040 }
6041
6042 static void
6043 bnx2_5708_serdes_timer(struct bnx2 *bp)
6044 {
6045 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6046 return;
6047
6048 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6049 bp->serdes_an_pending = 0;
6050 return;
6051 }
6052
6053 spin_lock(&bp->phy_lock);
6054 if (bp->serdes_an_pending)
6055 bp->serdes_an_pending--;
6056 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6057 u32 bmcr;
6058
6059 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6060 if (bmcr & BMCR_ANENABLE) {
6061 bnx2_enable_forced_2g5(bp);
6062 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6063 } else {
6064 bnx2_disable_forced_2g5(bp);
6065 bp->serdes_an_pending = 2;
6066 bp->current_interval = BNX2_TIMER_INTERVAL;
6067 }
6068
6069 } else
6070 bp->current_interval = BNX2_TIMER_INTERVAL;
6071
6072 spin_unlock(&bp->phy_lock);
6073 }
6074
6075 static void
6076 bnx2_timer(unsigned long data)
6077 {
6078 struct bnx2 *bp = (struct bnx2 *) data;
6079
6080 if (!netif_running(bp->dev))
6081 return;
6082
6083 if (atomic_read(&bp->intr_sem) != 0)
6084 goto bnx2_restart_timer;
6085
6086 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6087 BNX2_FLAG_USING_MSI)
6088 bnx2_chk_missed_msi(bp);
6089
6090 bnx2_send_heart_beat(bp);
6091
6092 bp->stats_blk->stat_FwRxDrop =
6093 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6094
6095 /* workaround occasional corrupted counters */
6096 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6097 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6098 BNX2_HC_COMMAND_STATS_NOW);
6099
6100 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6101 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6102 bnx2_5706_serdes_timer(bp);
6103 else
6104 bnx2_5708_serdes_timer(bp);
6105 }
6106
6107 bnx2_restart_timer:
6108 mod_timer(&bp->timer, jiffies + bp->current_interval);
6109 }
6110
6111 static int
6112 bnx2_request_irq(struct bnx2 *bp)
6113 {
6114 unsigned long flags;
6115 struct bnx2_irq *irq;
6116 int rc = 0, i;
6117
6118 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6119 flags = 0;
6120 else
6121 flags = IRQF_SHARED;
6122
6123 for (i = 0; i < bp->irq_nvecs; i++) {
6124 irq = &bp->irq_tbl[i];
6125 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6126 &bp->bnx2_napi[i]);
6127 if (rc)
6128 break;
6129 irq->requested = 1;
6130 }
6131 return rc;
6132 }
6133
6134 static void
6135 bnx2_free_irq(struct bnx2 *bp)
6136 {
6137 struct bnx2_irq *irq;
6138 int i;
6139
6140 for (i = 0; i < bp->irq_nvecs; i++) {
6141 irq = &bp->irq_tbl[i];
6142 if (irq->requested)
6143 free_irq(irq->vector, &bp->bnx2_napi[i]);
6144 irq->requested = 0;
6145 }
6146 if (bp->flags & BNX2_FLAG_USING_MSI)
6147 pci_disable_msi(bp->pdev);
6148 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6149 pci_disable_msix(bp->pdev);
6150
6151 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6152 }
6153
6154 static void
6155 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6156 {
6157 int i, total_vecs, rc;
6158 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6159 struct net_device *dev = bp->dev;
6160 const int len = sizeof(bp->irq_tbl[0].name);
6161
6162 bnx2_setup_msix_tbl(bp);
6163 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6164 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6165 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6166
6167 /* Need to flush the previous three writes to ensure MSI-X
6168 * is setup properly */
6169 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6170
6171 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6172 msix_ent[i].entry = i;
6173 msix_ent[i].vector = 0;
6174 }
6175
6176 total_vecs = msix_vecs;
6177 #ifdef BCM_CNIC
6178 total_vecs++;
6179 #endif
6180 rc = -ENOSPC;
6181 while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6182 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6183 if (rc <= 0)
6184 break;
6185 if (rc > 0)
6186 total_vecs = rc;
6187 }
6188
6189 if (rc != 0)
6190 return;
6191
6192 msix_vecs = total_vecs;
6193 #ifdef BCM_CNIC
6194 msix_vecs--;
6195 #endif
6196 bp->irq_nvecs = msix_vecs;
6197 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6198 for (i = 0; i < total_vecs; i++) {
6199 bp->irq_tbl[i].vector = msix_ent[i].vector;
6200 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6201 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6202 }
6203 }
6204
6205 static void
6206 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6207 {
6208 int cpus = num_online_cpus();
6209 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6210
6211 bp->irq_tbl[0].handler = bnx2_interrupt;
6212 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6213 bp->irq_nvecs = 1;
6214 bp->irq_tbl[0].vector = bp->pdev->irq;
6215
6216 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6217 bnx2_enable_msix(bp, msix_vecs);
6218
6219 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6220 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6221 if (pci_enable_msi(bp->pdev) == 0) {
6222 bp->flags |= BNX2_FLAG_USING_MSI;
6223 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6224 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6225 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6226 } else
6227 bp->irq_tbl[0].handler = bnx2_msi;
6228
6229 bp->irq_tbl[0].vector = bp->pdev->irq;
6230 }
6231 }
6232
6233 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6234 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6235
6236 bp->num_rx_rings = bp->irq_nvecs;
6237 }
6238
6239 /* Called with rtnl_lock */
6240 static int
6241 bnx2_open(struct net_device *dev)
6242 {
6243 struct bnx2 *bp = netdev_priv(dev);
6244 int rc;
6245
6246 netif_carrier_off(dev);
6247
6248 bnx2_set_power_state(bp, PCI_D0);
6249 bnx2_disable_int(bp);
6250
6251 bnx2_setup_int_mode(bp, disable_msi);
6252 bnx2_init_napi(bp);
6253 bnx2_napi_enable(bp);
6254 rc = bnx2_alloc_mem(bp);
6255 if (rc)
6256 goto open_err;
6257
6258 rc = bnx2_request_irq(bp);
6259 if (rc)
6260 goto open_err;
6261
6262 rc = bnx2_init_nic(bp, 1);
6263 if (rc)
6264 goto open_err;
6265
6266 mod_timer(&bp->timer, jiffies + bp->current_interval);
6267
6268 atomic_set(&bp->intr_sem, 0);
6269
6270 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6271
6272 bnx2_enable_int(bp);
6273
6274 if (bp->flags & BNX2_FLAG_USING_MSI) {
6275 /* Test MSI to make sure it is working
6276 * If MSI test fails, go back to INTx mode
6277 */
6278 if (bnx2_test_intr(bp) != 0) {
6279 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6280
6281 bnx2_disable_int(bp);
6282 bnx2_free_irq(bp);
6283
6284 bnx2_setup_int_mode(bp, 1);
6285
6286 rc = bnx2_init_nic(bp, 0);
6287
6288 if (!rc)
6289 rc = bnx2_request_irq(bp);
6290
6291 if (rc) {
6292 del_timer_sync(&bp->timer);
6293 goto open_err;
6294 }
6295 bnx2_enable_int(bp);
6296 }
6297 }
6298 if (bp->flags & BNX2_FLAG_USING_MSI)
6299 netdev_info(dev, "using MSI\n");
6300 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6301 netdev_info(dev, "using MSIX\n");
6302
6303 netif_tx_start_all_queues(dev);
6304
6305 return 0;
6306
6307 open_err:
6308 bnx2_napi_disable(bp);
6309 bnx2_free_skbs(bp);
6310 bnx2_free_irq(bp);
6311 bnx2_free_mem(bp);
6312 bnx2_del_napi(bp);
6313 return rc;
6314 }
6315
6316 static void
6317 bnx2_reset_task(struct work_struct *work)
6318 {
6319 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6320
6321 rtnl_lock();
6322 if (!netif_running(bp->dev)) {
6323 rtnl_unlock();
6324 return;
6325 }
6326
6327 bnx2_netif_stop(bp, true);
6328
6329 bnx2_init_nic(bp, 1);
6330
6331 atomic_set(&bp->intr_sem, 1);
6332 bnx2_netif_start(bp, true);
6333 rtnl_unlock();
6334 }
6335
6336 static void
6337 bnx2_dump_state(struct bnx2 *bp)
6338 {
6339 struct net_device *dev = bp->dev;
6340 u32 mcp_p0, mcp_p1, val1, val2;
6341
6342 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6343 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6344 atomic_read(&bp->intr_sem), val1);
6345 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6346 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6347 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6348 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6349 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6350 REG_RD(bp, BNX2_EMAC_RX_STATUS));
6351 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6352 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6353 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6354 mcp_p0 = BNX2_MCP_STATE_P0;
6355 mcp_p1 = BNX2_MCP_STATE_P1;
6356 } else {
6357 mcp_p0 = BNX2_MCP_STATE_P0_5708;
6358 mcp_p1 = BNX2_MCP_STATE_P1_5708;
6359 }
6360 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6361 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
6362 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6363 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6364 if (bp->flags & BNX2_FLAG_USING_MSIX)
6365 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6366 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6367 }
6368
6369 static void
6370 bnx2_tx_timeout(struct net_device *dev)
6371 {
6372 struct bnx2 *bp = netdev_priv(dev);
6373
6374 bnx2_dump_state(bp);
6375
6376 /* This allows the netif to be shutdown gracefully before resetting */
6377 schedule_work(&bp->reset_task);
6378 }
6379
6380 #ifdef BCM_VLAN
6381 /* Called with rtnl_lock */
6382 static void
6383 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6384 {
6385 struct bnx2 *bp = netdev_priv(dev);
6386
6387 if (netif_running(dev))
6388 bnx2_netif_stop(bp, false);
6389
6390 bp->vlgrp = vlgrp;
6391
6392 if (!netif_running(dev))
6393 return;
6394
6395 bnx2_set_rx_mode(dev);
6396 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6397 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6398
6399 bnx2_netif_start(bp, false);
6400 }
6401 #endif
6402
6403 /* Called with netif_tx_lock.
6404 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6405 * netif_wake_queue().
6406 */
6407 static netdev_tx_t
6408 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6409 {
6410 struct bnx2 *bp = netdev_priv(dev);
6411 dma_addr_t mapping;
6412 struct tx_bd *txbd;
6413 struct sw_tx_bd *tx_buf;
6414 u32 len, vlan_tag_flags, last_frag, mss;
6415 u16 prod, ring_prod;
6416 int i;
6417 struct bnx2_napi *bnapi;
6418 struct bnx2_tx_ring_info *txr;
6419 struct netdev_queue *txq;
6420
6421 /* Determine which tx ring we will be placed on */
6422 i = skb_get_queue_mapping(skb);
6423 bnapi = &bp->bnx2_napi[i];
6424 txr = &bnapi->tx_ring;
6425 txq = netdev_get_tx_queue(dev, i);
6426
6427 if (unlikely(bnx2_tx_avail(bp, txr) <
6428 (skb_shinfo(skb)->nr_frags + 1))) {
6429 netif_tx_stop_queue(txq);
6430 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6431
6432 return NETDEV_TX_BUSY;
6433 }
6434 len = skb_headlen(skb);
6435 prod = txr->tx_prod;
6436 ring_prod = TX_RING_IDX(prod);
6437
6438 vlan_tag_flags = 0;
6439 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6440 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6441 }
6442
6443 #ifdef BCM_VLAN
6444 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6445 vlan_tag_flags |=
6446 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6447 }
6448 #endif
6449 if ((mss = skb_shinfo(skb)->gso_size)) {
6450 u32 tcp_opt_len;
6451 struct iphdr *iph;
6452
6453 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6454
6455 tcp_opt_len = tcp_optlen(skb);
6456
6457 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6458 u32 tcp_off = skb_transport_offset(skb) -
6459 sizeof(struct ipv6hdr) - ETH_HLEN;
6460
6461 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6462 TX_BD_FLAGS_SW_FLAGS;
6463 if (likely(tcp_off == 0))
6464 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6465 else {
6466 tcp_off >>= 3;
6467 vlan_tag_flags |= ((tcp_off & 0x3) <<
6468 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6469 ((tcp_off & 0x10) <<
6470 TX_BD_FLAGS_TCP6_OFF4_SHL);
6471 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6472 }
6473 } else {
6474 iph = ip_hdr(skb);
6475 if (tcp_opt_len || (iph->ihl > 5)) {
6476 vlan_tag_flags |= ((iph->ihl - 5) +
6477 (tcp_opt_len >> 2)) << 8;
6478 }
6479 }
6480 } else
6481 mss = 0;
6482
6483 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6484 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6485 dev_kfree_skb(skb);
6486 return NETDEV_TX_OK;
6487 }
6488
6489 tx_buf = &txr->tx_buf_ring[ring_prod];
6490 tx_buf->skb = skb;
6491 dma_unmap_addr_set(tx_buf, mapping, mapping);
6492
6493 txbd = &txr->tx_desc_ring[ring_prod];
6494
6495 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6496 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6497 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6498 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6499
6500 last_frag = skb_shinfo(skb)->nr_frags;
6501 tx_buf->nr_frags = last_frag;
6502 tx_buf->is_gso = skb_is_gso(skb);
6503
6504 for (i = 0; i < last_frag; i++) {
6505 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6506
6507 prod = NEXT_TX_BD(prod);
6508 ring_prod = TX_RING_IDX(prod);
6509 txbd = &txr->tx_desc_ring[ring_prod];
6510
6511 len = frag->size;
6512 mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
6513 len, PCI_DMA_TODEVICE);
6514 if (dma_mapping_error(&bp->pdev->dev, mapping))
6515 goto dma_error;
6516 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6517 mapping);
6518
6519 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6520 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6521 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6522 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6523
6524 }
6525 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6526
6527 prod = NEXT_TX_BD(prod);
6528 txr->tx_prod_bseq += skb->len;
6529
6530 REG_WR16(bp, txr->tx_bidx_addr, prod);
6531 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6532
6533 mmiowb();
6534
6535 txr->tx_prod = prod;
6536
6537 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6538 netif_tx_stop_queue(txq);
6539
6540 /* netif_tx_stop_queue() must be done before checking
6541 * tx index in bnx2_tx_avail() below, because in
6542 * bnx2_tx_int(), we update tx index before checking for
6543 * netif_tx_queue_stopped().
6544 */
6545 smp_mb();
6546 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6547 netif_tx_wake_queue(txq);
6548 }
6549
6550 return NETDEV_TX_OK;
6551 dma_error:
6552 /* save value of frag that failed */
6553 last_frag = i;
6554
6555 /* start back at beginning and unmap skb */
6556 prod = txr->tx_prod;
6557 ring_prod = TX_RING_IDX(prod);
6558 tx_buf = &txr->tx_buf_ring[ring_prod];
6559 tx_buf->skb = NULL;
6560 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6561 skb_headlen(skb), PCI_DMA_TODEVICE);
6562
6563 /* unmap remaining mapped pages */
6564 for (i = 0; i < last_frag; i++) {
6565 prod = NEXT_TX_BD(prod);
6566 ring_prod = TX_RING_IDX(prod);
6567 tx_buf = &txr->tx_buf_ring[ring_prod];
6568 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6569 skb_shinfo(skb)->frags[i].size,
6570 PCI_DMA_TODEVICE);
6571 }
6572
6573 dev_kfree_skb(skb);
6574 return NETDEV_TX_OK;
6575 }
6576
6577 /* Called with rtnl_lock */
6578 static int
6579 bnx2_close(struct net_device *dev)
6580 {
6581 struct bnx2 *bp = netdev_priv(dev);
6582
6583 cancel_work_sync(&bp->reset_task);
6584
6585 bnx2_disable_int_sync(bp);
6586 bnx2_napi_disable(bp);
6587 del_timer_sync(&bp->timer);
6588 bnx2_shutdown_chip(bp);
6589 bnx2_free_irq(bp);
6590 bnx2_free_skbs(bp);
6591 bnx2_free_mem(bp);
6592 bnx2_del_napi(bp);
6593 bp->link_up = 0;
6594 netif_carrier_off(bp->dev);
6595 bnx2_set_power_state(bp, PCI_D3hot);
6596 return 0;
6597 }
6598
6599 static void
6600 bnx2_save_stats(struct bnx2 *bp)
6601 {
6602 u32 *hw_stats = (u32 *) bp->stats_blk;
6603 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6604 int i;
6605
6606 /* The 1st 10 counters are 64-bit counters */
6607 for (i = 0; i < 20; i += 2) {
6608 u32 hi;
6609 u64 lo;
6610
6611 hi = temp_stats[i] + hw_stats[i];
6612 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6613 if (lo > 0xffffffff)
6614 hi++;
6615 temp_stats[i] = hi;
6616 temp_stats[i + 1] = lo & 0xffffffff;
6617 }
6618
6619 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6620 temp_stats[i] += hw_stats[i];
6621 }
6622
6623 #define GET_64BIT_NET_STATS64(ctr) \
6624 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6625
6626 #define GET_64BIT_NET_STATS(ctr) \
6627 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6628 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6629
6630 #define GET_32BIT_NET_STATS(ctr) \
6631 (unsigned long) (bp->stats_blk->ctr + \
6632 bp->temp_stats_blk->ctr)
6633
6634 static struct rtnl_link_stats64 *
6635 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6636 {
6637 struct bnx2 *bp = netdev_priv(dev);
6638
6639 if (bp->stats_blk == NULL)
6640 return net_stats;
6641
6642 net_stats->rx_packets =
6643 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6644 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6645 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6646
6647 net_stats->tx_packets =
6648 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6649 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6650 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6651
6652 net_stats->rx_bytes =
6653 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6654
6655 net_stats->tx_bytes =
6656 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6657
6658 net_stats->multicast =
6659 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6660
6661 net_stats->collisions =
6662 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6663
6664 net_stats->rx_length_errors =
6665 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6666 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6667
6668 net_stats->rx_over_errors =
6669 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6670 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6671
6672 net_stats->rx_frame_errors =
6673 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6674
6675 net_stats->rx_crc_errors =
6676 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6677
6678 net_stats->rx_errors = net_stats->rx_length_errors +
6679 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6680 net_stats->rx_crc_errors;
6681
6682 net_stats->tx_aborted_errors =
6683 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6684 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6685
6686 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6687 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6688 net_stats->tx_carrier_errors = 0;
6689 else {
6690 net_stats->tx_carrier_errors =
6691 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6692 }
6693
6694 net_stats->tx_errors =
6695 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6696 net_stats->tx_aborted_errors +
6697 net_stats->tx_carrier_errors;
6698
6699 net_stats->rx_missed_errors =
6700 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6701 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6702 GET_32BIT_NET_STATS(stat_FwRxDrop);
6703
6704 return net_stats;
6705 }
6706
6707 /* All ethtool functions called with rtnl_lock */
6708
6709 static int
6710 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6711 {
6712 struct bnx2 *bp = netdev_priv(dev);
6713 int support_serdes = 0, support_copper = 0;
6714
6715 cmd->supported = SUPPORTED_Autoneg;
6716 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6717 support_serdes = 1;
6718 support_copper = 1;
6719 } else if (bp->phy_port == PORT_FIBRE)
6720 support_serdes = 1;
6721 else
6722 support_copper = 1;
6723
6724 if (support_serdes) {
6725 cmd->supported |= SUPPORTED_1000baseT_Full |
6726 SUPPORTED_FIBRE;
6727 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6728 cmd->supported |= SUPPORTED_2500baseX_Full;
6729
6730 }
6731 if (support_copper) {
6732 cmd->supported |= SUPPORTED_10baseT_Half |
6733 SUPPORTED_10baseT_Full |
6734 SUPPORTED_100baseT_Half |
6735 SUPPORTED_100baseT_Full |
6736 SUPPORTED_1000baseT_Full |
6737 SUPPORTED_TP;
6738
6739 }
6740
6741 spin_lock_bh(&bp->phy_lock);
6742 cmd->port = bp->phy_port;
6743 cmd->advertising = bp->advertising;
6744
6745 if (bp->autoneg & AUTONEG_SPEED) {
6746 cmd->autoneg = AUTONEG_ENABLE;
6747 }
6748 else {
6749 cmd->autoneg = AUTONEG_DISABLE;
6750 }
6751
6752 if (netif_carrier_ok(dev)) {
6753 cmd->speed = bp->line_speed;
6754 cmd->duplex = bp->duplex;
6755 }
6756 else {
6757 cmd->speed = -1;
6758 cmd->duplex = -1;
6759 }
6760 spin_unlock_bh(&bp->phy_lock);
6761
6762 cmd->transceiver = XCVR_INTERNAL;
6763 cmd->phy_address = bp->phy_addr;
6764
6765 return 0;
6766 }
6767
6768 static int
6769 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6770 {
6771 struct bnx2 *bp = netdev_priv(dev);
6772 u8 autoneg = bp->autoneg;
6773 u8 req_duplex = bp->req_duplex;
6774 u16 req_line_speed = bp->req_line_speed;
6775 u32 advertising = bp->advertising;
6776 int err = -EINVAL;
6777
6778 spin_lock_bh(&bp->phy_lock);
6779
6780 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6781 goto err_out_unlock;
6782
6783 if (cmd->port != bp->phy_port &&
6784 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6785 goto err_out_unlock;
6786
6787 /* If device is down, we can store the settings only if the user
6788 * is setting the currently active port.
6789 */
6790 if (!netif_running(dev) && cmd->port != bp->phy_port)
6791 goto err_out_unlock;
6792
6793 if (cmd->autoneg == AUTONEG_ENABLE) {
6794 autoneg |= AUTONEG_SPEED;
6795
6796 advertising = cmd->advertising;
6797 if (cmd->port == PORT_TP) {
6798 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6799 if (!advertising)
6800 advertising = ETHTOOL_ALL_COPPER_SPEED;
6801 } else {
6802 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6803 if (!advertising)
6804 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6805 }
6806 advertising |= ADVERTISED_Autoneg;
6807 }
6808 else {
6809 if (cmd->port == PORT_FIBRE) {
6810 if ((cmd->speed != SPEED_1000 &&
6811 cmd->speed != SPEED_2500) ||
6812 (cmd->duplex != DUPLEX_FULL))
6813 goto err_out_unlock;
6814
6815 if (cmd->speed == SPEED_2500 &&
6816 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6817 goto err_out_unlock;
6818 }
6819 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6820 goto err_out_unlock;
6821
6822 autoneg &= ~AUTONEG_SPEED;
6823 req_line_speed = cmd->speed;
6824 req_duplex = cmd->duplex;
6825 advertising = 0;
6826 }
6827
6828 bp->autoneg = autoneg;
6829 bp->advertising = advertising;
6830 bp->req_line_speed = req_line_speed;
6831 bp->req_duplex = req_duplex;
6832
6833 err = 0;
6834 /* If device is down, the new settings will be picked up when it is
6835 * brought up.
6836 */
6837 if (netif_running(dev))
6838 err = bnx2_setup_phy(bp, cmd->port);
6839
6840 err_out_unlock:
6841 spin_unlock_bh(&bp->phy_lock);
6842
6843 return err;
6844 }
6845
6846 static void
6847 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6848 {
6849 struct bnx2 *bp = netdev_priv(dev);
6850
6851 strcpy(info->driver, DRV_MODULE_NAME);
6852 strcpy(info->version, DRV_MODULE_VERSION);
6853 strcpy(info->bus_info, pci_name(bp->pdev));
6854 strcpy(info->fw_version, bp->fw_version);
6855 }
6856
6857 #define BNX2_REGDUMP_LEN (32 * 1024)
6858
6859 static int
6860 bnx2_get_regs_len(struct net_device *dev)
6861 {
6862 return BNX2_REGDUMP_LEN;
6863 }
6864
6865 static void
6866 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6867 {
6868 u32 *p = _p, i, offset;
6869 u8 *orig_p = _p;
6870 struct bnx2 *bp = netdev_priv(dev);
6871 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6872 0x0800, 0x0880, 0x0c00, 0x0c10,
6873 0x0c30, 0x0d08, 0x1000, 0x101c,
6874 0x1040, 0x1048, 0x1080, 0x10a4,
6875 0x1400, 0x1490, 0x1498, 0x14f0,
6876 0x1500, 0x155c, 0x1580, 0x15dc,
6877 0x1600, 0x1658, 0x1680, 0x16d8,
6878 0x1800, 0x1820, 0x1840, 0x1854,
6879 0x1880, 0x1894, 0x1900, 0x1984,
6880 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6881 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6882 0x2000, 0x2030, 0x23c0, 0x2400,
6883 0x2800, 0x2820, 0x2830, 0x2850,
6884 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6885 0x3c00, 0x3c94, 0x4000, 0x4010,
6886 0x4080, 0x4090, 0x43c0, 0x4458,
6887 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6888 0x4fc0, 0x5010, 0x53c0, 0x5444,
6889 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6890 0x5fc0, 0x6000, 0x6400, 0x6428,
6891 0x6800, 0x6848, 0x684c, 0x6860,
6892 0x6888, 0x6910, 0x8000 };
6893
6894 regs->version = 0;
6895
6896 memset(p, 0, BNX2_REGDUMP_LEN);
6897
6898 if (!netif_running(bp->dev))
6899 return;
6900
6901 i = 0;
6902 offset = reg_boundaries[0];
6903 p += offset;
6904 while (offset < BNX2_REGDUMP_LEN) {
6905 *p++ = REG_RD(bp, offset);
6906 offset += 4;
6907 if (offset == reg_boundaries[i + 1]) {
6908 offset = reg_boundaries[i + 2];
6909 p = (u32 *) (orig_p + offset);
6910 i += 2;
6911 }
6912 }
6913 }
6914
6915 static void
6916 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6917 {
6918 struct bnx2 *bp = netdev_priv(dev);
6919
6920 if (bp->flags & BNX2_FLAG_NO_WOL) {
6921 wol->supported = 0;
6922 wol->wolopts = 0;
6923 }
6924 else {
6925 wol->supported = WAKE_MAGIC;
6926 if (bp->wol)
6927 wol->wolopts = WAKE_MAGIC;
6928 else
6929 wol->wolopts = 0;
6930 }
6931 memset(&wol->sopass, 0, sizeof(wol->sopass));
6932 }
6933
6934 static int
6935 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6936 {
6937 struct bnx2 *bp = netdev_priv(dev);
6938
6939 if (wol->wolopts & ~WAKE_MAGIC)
6940 return -EINVAL;
6941
6942 if (wol->wolopts & WAKE_MAGIC) {
6943 if (bp->flags & BNX2_FLAG_NO_WOL)
6944 return -EINVAL;
6945
6946 bp->wol = 1;
6947 }
6948 else {
6949 bp->wol = 0;
6950 }
6951 return 0;
6952 }
6953
6954 static int
6955 bnx2_nway_reset(struct net_device *dev)
6956 {
6957 struct bnx2 *bp = netdev_priv(dev);
6958 u32 bmcr;
6959
6960 if (!netif_running(dev))
6961 return -EAGAIN;
6962
6963 if (!(bp->autoneg & AUTONEG_SPEED)) {
6964 return -EINVAL;
6965 }
6966
6967 spin_lock_bh(&bp->phy_lock);
6968
6969 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6970 int rc;
6971
6972 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6973 spin_unlock_bh(&bp->phy_lock);
6974 return rc;
6975 }
6976
6977 /* Force a link down visible on the other side */
6978 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6979 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6980 spin_unlock_bh(&bp->phy_lock);
6981
6982 msleep(20);
6983
6984 spin_lock_bh(&bp->phy_lock);
6985
6986 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6987 bp->serdes_an_pending = 1;
6988 mod_timer(&bp->timer, jiffies + bp->current_interval);
6989 }
6990
6991 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6992 bmcr &= ~BMCR_LOOPBACK;
6993 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6994
6995 spin_unlock_bh(&bp->phy_lock);
6996
6997 return 0;
6998 }
6999
7000 static u32
7001 bnx2_get_link(struct net_device *dev)
7002 {
7003 struct bnx2 *bp = netdev_priv(dev);
7004
7005 return bp->link_up;
7006 }
7007
7008 static int
7009 bnx2_get_eeprom_len(struct net_device *dev)
7010 {
7011 struct bnx2 *bp = netdev_priv(dev);
7012
7013 if (bp->flash_info == NULL)
7014 return 0;
7015
7016 return (int) bp->flash_size;
7017 }
7018
7019 static int
7020 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7021 u8 *eebuf)
7022 {
7023 struct bnx2 *bp = netdev_priv(dev);
7024 int rc;
7025
7026 if (!netif_running(dev))
7027 return -EAGAIN;
7028
7029 /* parameters already validated in ethtool_get_eeprom */
7030
7031 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7032
7033 return rc;
7034 }
7035
7036 static int
7037 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7038 u8 *eebuf)
7039 {
7040 struct bnx2 *bp = netdev_priv(dev);
7041 int rc;
7042
7043 if (!netif_running(dev))
7044 return -EAGAIN;
7045
7046 /* parameters already validated in ethtool_set_eeprom */
7047
7048 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7049
7050 return rc;
7051 }
7052
7053 static int
7054 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7055 {
7056 struct bnx2 *bp = netdev_priv(dev);
7057
7058 memset(coal, 0, sizeof(struct ethtool_coalesce));
7059
7060 coal->rx_coalesce_usecs = bp->rx_ticks;
7061 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7062 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7063 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7064
7065 coal->tx_coalesce_usecs = bp->tx_ticks;
7066 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7067 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7068 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7069
7070 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7071
7072 return 0;
7073 }
7074
7075 static int
7076 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7077 {
7078 struct bnx2 *bp = netdev_priv(dev);
7079
7080 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7081 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7082
7083 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7084 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7085
7086 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7087 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7088
7089 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7090 if (bp->rx_quick_cons_trip_int > 0xff)
7091 bp->rx_quick_cons_trip_int = 0xff;
7092
7093 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7094 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7095
7096 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7097 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7098
7099 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7100 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7101
7102 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7103 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7104 0xff;
7105
7106 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7107 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7108 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7109 bp->stats_ticks = USEC_PER_SEC;
7110 }
7111 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7112 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7113 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7114
7115 if (netif_running(bp->dev)) {
7116 bnx2_netif_stop(bp, true);
7117 bnx2_init_nic(bp, 0);
7118 bnx2_netif_start(bp, true);
7119 }
7120
7121 return 0;
7122 }
7123
7124 static void
7125 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7126 {
7127 struct bnx2 *bp = netdev_priv(dev);
7128
7129 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7130 ering->rx_mini_max_pending = 0;
7131 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7132
7133 ering->rx_pending = bp->rx_ring_size;
7134 ering->rx_mini_pending = 0;
7135 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7136
7137 ering->tx_max_pending = MAX_TX_DESC_CNT;
7138 ering->tx_pending = bp->tx_ring_size;
7139 }
7140
7141 static int
7142 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7143 {
7144 if (netif_running(bp->dev)) {
7145 /* Reset will erase chipset stats; save them */
7146 bnx2_save_stats(bp);
7147
7148 bnx2_netif_stop(bp, true);
7149 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7150 bnx2_free_skbs(bp);
7151 bnx2_free_mem(bp);
7152 }
7153
7154 bnx2_set_rx_ring_size(bp, rx);
7155 bp->tx_ring_size = tx;
7156
7157 if (netif_running(bp->dev)) {
7158 int rc;
7159
7160 rc = bnx2_alloc_mem(bp);
7161 if (!rc)
7162 rc = bnx2_init_nic(bp, 0);
7163
7164 if (rc) {
7165 bnx2_napi_enable(bp);
7166 dev_close(bp->dev);
7167 return rc;
7168 }
7169 #ifdef BCM_CNIC
7170 mutex_lock(&bp->cnic_lock);
7171 /* Let cnic know about the new status block. */
7172 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7173 bnx2_setup_cnic_irq_info(bp);
7174 mutex_unlock(&bp->cnic_lock);
7175 #endif
7176 bnx2_netif_start(bp, true);
7177 }
7178 return 0;
7179 }
7180
7181 static int
7182 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7183 {
7184 struct bnx2 *bp = netdev_priv(dev);
7185 int rc;
7186
7187 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7188 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7189 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7190
7191 return -EINVAL;
7192 }
7193 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7194 return rc;
7195 }
7196
7197 static void
7198 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7199 {
7200 struct bnx2 *bp = netdev_priv(dev);
7201
7202 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7203 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7204 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7205 }
7206
7207 static int
7208 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7209 {
7210 struct bnx2 *bp = netdev_priv(dev);
7211
7212 bp->req_flow_ctrl = 0;
7213 if (epause->rx_pause)
7214 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7215 if (epause->tx_pause)
7216 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7217
7218 if (epause->autoneg) {
7219 bp->autoneg |= AUTONEG_FLOW_CTRL;
7220 }
7221 else {
7222 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7223 }
7224
7225 if (netif_running(dev)) {
7226 spin_lock_bh(&bp->phy_lock);
7227 bnx2_setup_phy(bp, bp->phy_port);
7228 spin_unlock_bh(&bp->phy_lock);
7229 }
7230
7231 return 0;
7232 }
7233
7234 static u32
7235 bnx2_get_rx_csum(struct net_device *dev)
7236 {
7237 struct bnx2 *bp = netdev_priv(dev);
7238
7239 return bp->rx_csum;
7240 }
7241
7242 static int
7243 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7244 {
7245 struct bnx2 *bp = netdev_priv(dev);
7246
7247 bp->rx_csum = data;
7248 return 0;
7249 }
7250
7251 static int
7252 bnx2_set_tso(struct net_device *dev, u32 data)
7253 {
7254 struct bnx2 *bp = netdev_priv(dev);
7255
7256 if (data) {
7257 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7258 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7259 dev->features |= NETIF_F_TSO6;
7260 } else
7261 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7262 NETIF_F_TSO_ECN);
7263 return 0;
7264 }
7265
7266 static struct {
7267 char string[ETH_GSTRING_LEN];
7268 } bnx2_stats_str_arr[] = {
7269 { "rx_bytes" },
7270 { "rx_error_bytes" },
7271 { "tx_bytes" },
7272 { "tx_error_bytes" },
7273 { "rx_ucast_packets" },
7274 { "rx_mcast_packets" },
7275 { "rx_bcast_packets" },
7276 { "tx_ucast_packets" },
7277 { "tx_mcast_packets" },
7278 { "tx_bcast_packets" },
7279 { "tx_mac_errors" },
7280 { "tx_carrier_errors" },
7281 { "rx_crc_errors" },
7282 { "rx_align_errors" },
7283 { "tx_single_collisions" },
7284 { "tx_multi_collisions" },
7285 { "tx_deferred" },
7286 { "tx_excess_collisions" },
7287 { "tx_late_collisions" },
7288 { "tx_total_collisions" },
7289 { "rx_fragments" },
7290 { "rx_jabbers" },
7291 { "rx_undersize_packets" },
7292 { "rx_oversize_packets" },
7293 { "rx_64_byte_packets" },
7294 { "rx_65_to_127_byte_packets" },
7295 { "rx_128_to_255_byte_packets" },
7296 { "rx_256_to_511_byte_packets" },
7297 { "rx_512_to_1023_byte_packets" },
7298 { "rx_1024_to_1522_byte_packets" },
7299 { "rx_1523_to_9022_byte_packets" },
7300 { "tx_64_byte_packets" },
7301 { "tx_65_to_127_byte_packets" },
7302 { "tx_128_to_255_byte_packets" },
7303 { "tx_256_to_511_byte_packets" },
7304 { "tx_512_to_1023_byte_packets" },
7305 { "tx_1024_to_1522_byte_packets" },
7306 { "tx_1523_to_9022_byte_packets" },
7307 { "rx_xon_frames" },
7308 { "rx_xoff_frames" },
7309 { "tx_xon_frames" },
7310 { "tx_xoff_frames" },
7311 { "rx_mac_ctrl_frames" },
7312 { "rx_filtered_packets" },
7313 { "rx_ftq_discards" },
7314 { "rx_discards" },
7315 { "rx_fw_discards" },
7316 };
7317
7318 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7319 sizeof(bnx2_stats_str_arr[0]))
7320
7321 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7322
7323 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7324 STATS_OFFSET32(stat_IfHCInOctets_hi),
7325 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7326 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7327 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7328 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7329 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7330 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7331 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7332 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7333 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7334 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7335 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7336 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7337 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7338 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7339 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7340 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7341 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7342 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7343 STATS_OFFSET32(stat_EtherStatsCollisions),
7344 STATS_OFFSET32(stat_EtherStatsFragments),
7345 STATS_OFFSET32(stat_EtherStatsJabbers),
7346 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7347 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7348 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7349 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7350 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7351 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7352 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7353 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7354 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7355 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7356 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7357 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7358 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7359 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7360 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7361 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7362 STATS_OFFSET32(stat_XonPauseFramesReceived),
7363 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7364 STATS_OFFSET32(stat_OutXonSent),
7365 STATS_OFFSET32(stat_OutXoffSent),
7366 STATS_OFFSET32(stat_MacControlFramesReceived),
7367 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7368 STATS_OFFSET32(stat_IfInFTQDiscards),
7369 STATS_OFFSET32(stat_IfInMBUFDiscards),
7370 STATS_OFFSET32(stat_FwRxDrop),
7371 };
7372
7373 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7374 * skipped because of errata.
7375 */
7376 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7377 8,0,8,8,8,8,8,8,8,8,
7378 4,0,4,4,4,4,4,4,4,4,
7379 4,4,4,4,4,4,4,4,4,4,
7380 4,4,4,4,4,4,4,4,4,4,
7381 4,4,4,4,4,4,4,
7382 };
7383
7384 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7385 8,0,8,8,8,8,8,8,8,8,
7386 4,4,4,4,4,4,4,4,4,4,
7387 4,4,4,4,4,4,4,4,4,4,
7388 4,4,4,4,4,4,4,4,4,4,
7389 4,4,4,4,4,4,4,
7390 };
7391
7392 #define BNX2_NUM_TESTS 6
7393
7394 static struct {
7395 char string[ETH_GSTRING_LEN];
7396 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7397 { "register_test (offline)" },
7398 { "memory_test (offline)" },
7399 { "loopback_test (offline)" },
7400 { "nvram_test (online)" },
7401 { "interrupt_test (online)" },
7402 { "link_test (online)" },
7403 };
7404
7405 static int
7406 bnx2_get_sset_count(struct net_device *dev, int sset)
7407 {
7408 switch (sset) {
7409 case ETH_SS_TEST:
7410 return BNX2_NUM_TESTS;
7411 case ETH_SS_STATS:
7412 return BNX2_NUM_STATS;
7413 default:
7414 return -EOPNOTSUPP;
7415 }
7416 }
7417
7418 static void
7419 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7420 {
7421 struct bnx2 *bp = netdev_priv(dev);
7422
7423 bnx2_set_power_state(bp, PCI_D0);
7424
7425 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7426 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7427 int i;
7428
7429 bnx2_netif_stop(bp, true);
7430 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7431 bnx2_free_skbs(bp);
7432
7433 if (bnx2_test_registers(bp) != 0) {
7434 buf[0] = 1;
7435 etest->flags |= ETH_TEST_FL_FAILED;
7436 }
7437 if (bnx2_test_memory(bp) != 0) {
7438 buf[1] = 1;
7439 etest->flags |= ETH_TEST_FL_FAILED;
7440 }
7441 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7442 etest->flags |= ETH_TEST_FL_FAILED;
7443
7444 if (!netif_running(bp->dev))
7445 bnx2_shutdown_chip(bp);
7446 else {
7447 bnx2_init_nic(bp, 1);
7448 bnx2_netif_start(bp, true);
7449 }
7450
7451 /* wait for link up */
7452 for (i = 0; i < 7; i++) {
7453 if (bp->link_up)
7454 break;
7455 msleep_interruptible(1000);
7456 }
7457 }
7458
7459 if (bnx2_test_nvram(bp) != 0) {
7460 buf[3] = 1;
7461 etest->flags |= ETH_TEST_FL_FAILED;
7462 }
7463 if (bnx2_test_intr(bp) != 0) {
7464 buf[4] = 1;
7465 etest->flags |= ETH_TEST_FL_FAILED;
7466 }
7467
7468 if (bnx2_test_link(bp) != 0) {
7469 buf[5] = 1;
7470 etest->flags |= ETH_TEST_FL_FAILED;
7471
7472 }
7473 if (!netif_running(bp->dev))
7474 bnx2_set_power_state(bp, PCI_D3hot);
7475 }
7476
7477 static void
7478 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7479 {
7480 switch (stringset) {
7481 case ETH_SS_STATS:
7482 memcpy(buf, bnx2_stats_str_arr,
7483 sizeof(bnx2_stats_str_arr));
7484 break;
7485 case ETH_SS_TEST:
7486 memcpy(buf, bnx2_tests_str_arr,
7487 sizeof(bnx2_tests_str_arr));
7488 break;
7489 }
7490 }
7491
7492 static void
7493 bnx2_get_ethtool_stats(struct net_device *dev,
7494 struct ethtool_stats *stats, u64 *buf)
7495 {
7496 struct bnx2 *bp = netdev_priv(dev);
7497 int i;
7498 u32 *hw_stats = (u32 *) bp->stats_blk;
7499 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7500 u8 *stats_len_arr = NULL;
7501
7502 if (hw_stats == NULL) {
7503 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7504 return;
7505 }
7506
7507 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7508 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7509 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7510 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7511 stats_len_arr = bnx2_5706_stats_len_arr;
7512 else
7513 stats_len_arr = bnx2_5708_stats_len_arr;
7514
7515 for (i = 0; i < BNX2_NUM_STATS; i++) {
7516 unsigned long offset;
7517
7518 if (stats_len_arr[i] == 0) {
7519 /* skip this counter */
7520 buf[i] = 0;
7521 continue;
7522 }
7523
7524 offset = bnx2_stats_offset_arr[i];
7525 if (stats_len_arr[i] == 4) {
7526 /* 4-byte counter */
7527 buf[i] = (u64) *(hw_stats + offset) +
7528 *(temp_stats + offset);
7529 continue;
7530 }
7531 /* 8-byte counter */
7532 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7533 *(hw_stats + offset + 1) +
7534 (((u64) *(temp_stats + offset)) << 32) +
7535 *(temp_stats + offset + 1);
7536 }
7537 }
7538
7539 static int
7540 bnx2_phys_id(struct net_device *dev, u32 data)
7541 {
7542 struct bnx2 *bp = netdev_priv(dev);
7543 int i;
7544 u32 save;
7545
7546 bnx2_set_power_state(bp, PCI_D0);
7547
7548 if (data == 0)
7549 data = 2;
7550
7551 save = REG_RD(bp, BNX2_MISC_CFG);
7552 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7553
7554 for (i = 0; i < (data * 2); i++) {
7555 if ((i % 2) == 0) {
7556 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7557 }
7558 else {
7559 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7560 BNX2_EMAC_LED_1000MB_OVERRIDE |
7561 BNX2_EMAC_LED_100MB_OVERRIDE |
7562 BNX2_EMAC_LED_10MB_OVERRIDE |
7563 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7564 BNX2_EMAC_LED_TRAFFIC);
7565 }
7566 msleep_interruptible(500);
7567 if (signal_pending(current))
7568 break;
7569 }
7570 REG_WR(bp, BNX2_EMAC_LED, 0);
7571 REG_WR(bp, BNX2_MISC_CFG, save);
7572
7573 if (!netif_running(dev))
7574 bnx2_set_power_state(bp, PCI_D3hot);
7575
7576 return 0;
7577 }
7578
7579 static int
7580 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7581 {
7582 struct bnx2 *bp = netdev_priv(dev);
7583
7584 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7585 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7586 else
7587 return (ethtool_op_set_tx_csum(dev, data));
7588 }
7589
7590 static int
7591 bnx2_set_flags(struct net_device *dev, u32 data)
7592 {
7593 return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
7594 }
7595
7596 static const struct ethtool_ops bnx2_ethtool_ops = {
7597 .get_settings = bnx2_get_settings,
7598 .set_settings = bnx2_set_settings,
7599 .get_drvinfo = bnx2_get_drvinfo,
7600 .get_regs_len = bnx2_get_regs_len,
7601 .get_regs = bnx2_get_regs,
7602 .get_wol = bnx2_get_wol,
7603 .set_wol = bnx2_set_wol,
7604 .nway_reset = bnx2_nway_reset,
7605 .get_link = bnx2_get_link,
7606 .get_eeprom_len = bnx2_get_eeprom_len,
7607 .get_eeprom = bnx2_get_eeprom,
7608 .set_eeprom = bnx2_set_eeprom,
7609 .get_coalesce = bnx2_get_coalesce,
7610 .set_coalesce = bnx2_set_coalesce,
7611 .get_ringparam = bnx2_get_ringparam,
7612 .set_ringparam = bnx2_set_ringparam,
7613 .get_pauseparam = bnx2_get_pauseparam,
7614 .set_pauseparam = bnx2_set_pauseparam,
7615 .get_rx_csum = bnx2_get_rx_csum,
7616 .set_rx_csum = bnx2_set_rx_csum,
7617 .set_tx_csum = bnx2_set_tx_csum,
7618 .set_sg = ethtool_op_set_sg,
7619 .set_tso = bnx2_set_tso,
7620 .self_test = bnx2_self_test,
7621 .get_strings = bnx2_get_strings,
7622 .phys_id = bnx2_phys_id,
7623 .get_ethtool_stats = bnx2_get_ethtool_stats,
7624 .get_sset_count = bnx2_get_sset_count,
7625 .set_flags = bnx2_set_flags,
7626 .get_flags = ethtool_op_get_flags,
7627 };
7628
7629 /* Called with rtnl_lock */
7630 static int
7631 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7632 {
7633 struct mii_ioctl_data *data = if_mii(ifr);
7634 struct bnx2 *bp = netdev_priv(dev);
7635 int err;
7636
7637 switch(cmd) {
7638 case SIOCGMIIPHY:
7639 data->phy_id = bp->phy_addr;
7640
7641 /* fallthru */
7642 case SIOCGMIIREG: {
7643 u32 mii_regval;
7644
7645 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7646 return -EOPNOTSUPP;
7647
7648 if (!netif_running(dev))
7649 return -EAGAIN;
7650
7651 spin_lock_bh(&bp->phy_lock);
7652 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7653 spin_unlock_bh(&bp->phy_lock);
7654
7655 data->val_out = mii_regval;
7656
7657 return err;
7658 }
7659
7660 case SIOCSMIIREG:
7661 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7662 return -EOPNOTSUPP;
7663
7664 if (!netif_running(dev))
7665 return -EAGAIN;
7666
7667 spin_lock_bh(&bp->phy_lock);
7668 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7669 spin_unlock_bh(&bp->phy_lock);
7670
7671 return err;
7672
7673 default:
7674 /* do nothing */
7675 break;
7676 }
7677 return -EOPNOTSUPP;
7678 }
7679
7680 /* Called with rtnl_lock */
7681 static int
7682 bnx2_change_mac_addr(struct net_device *dev, void *p)
7683 {
7684 struct sockaddr *addr = p;
7685 struct bnx2 *bp = netdev_priv(dev);
7686
7687 if (!is_valid_ether_addr(addr->sa_data))
7688 return -EINVAL;
7689
7690 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7691 if (netif_running(dev))
7692 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7693
7694 return 0;
7695 }
7696
7697 /* Called with rtnl_lock */
7698 static int
7699 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7700 {
7701 struct bnx2 *bp = netdev_priv(dev);
7702
7703 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7704 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7705 return -EINVAL;
7706
7707 dev->mtu = new_mtu;
7708 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7709 }
7710
7711 #ifdef CONFIG_NET_POLL_CONTROLLER
7712 static void
7713 poll_bnx2(struct net_device *dev)
7714 {
7715 struct bnx2 *bp = netdev_priv(dev);
7716 int i;
7717
7718 for (i = 0; i < bp->irq_nvecs; i++) {
7719 struct bnx2_irq *irq = &bp->irq_tbl[i];
7720
7721 disable_irq(irq->vector);
7722 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7723 enable_irq(irq->vector);
7724 }
7725 }
7726 #endif
7727
7728 static void __devinit
7729 bnx2_get_5709_media(struct bnx2 *bp)
7730 {
7731 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7732 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7733 u32 strap;
7734
7735 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7736 return;
7737 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7738 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7739 return;
7740 }
7741
7742 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7743 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7744 else
7745 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7746
7747 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7748 switch (strap) {
7749 case 0x4:
7750 case 0x5:
7751 case 0x6:
7752 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7753 return;
7754 }
7755 } else {
7756 switch (strap) {
7757 case 0x1:
7758 case 0x2:
7759 case 0x4:
7760 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7761 return;
7762 }
7763 }
7764 }
7765
7766 static void __devinit
7767 bnx2_get_pci_speed(struct bnx2 *bp)
7768 {
7769 u32 reg;
7770
7771 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7772 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7773 u32 clkreg;
7774
7775 bp->flags |= BNX2_FLAG_PCIX;
7776
7777 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7778
7779 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7780 switch (clkreg) {
7781 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7782 bp->bus_speed_mhz = 133;
7783 break;
7784
7785 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7786 bp->bus_speed_mhz = 100;
7787 break;
7788
7789 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7790 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7791 bp->bus_speed_mhz = 66;
7792 break;
7793
7794 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7795 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7796 bp->bus_speed_mhz = 50;
7797 break;
7798
7799 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7800 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7801 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7802 bp->bus_speed_mhz = 33;
7803 break;
7804 }
7805 }
7806 else {
7807 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7808 bp->bus_speed_mhz = 66;
7809 else
7810 bp->bus_speed_mhz = 33;
7811 }
7812
7813 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7814 bp->flags |= BNX2_FLAG_PCI_32BIT;
7815
7816 }
7817
7818 static void __devinit
7819 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7820 {
7821 int rc, i, j;
7822 u8 *data;
7823 unsigned int block_end, rosize, len;
7824
7825 #define BNX2_VPD_NVRAM_OFFSET 0x300
7826 #define BNX2_VPD_LEN 128
7827 #define BNX2_MAX_VER_SLEN 30
7828
7829 data = kmalloc(256, GFP_KERNEL);
7830 if (!data)
7831 return;
7832
7833 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7834 BNX2_VPD_LEN);
7835 if (rc)
7836 goto vpd_done;
7837
7838 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7839 data[i] = data[i + BNX2_VPD_LEN + 3];
7840 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7841 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7842 data[i + 3] = data[i + BNX2_VPD_LEN];
7843 }
7844
7845 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7846 if (i < 0)
7847 goto vpd_done;
7848
7849 rosize = pci_vpd_lrdt_size(&data[i]);
7850 i += PCI_VPD_LRDT_TAG_SIZE;
7851 block_end = i + rosize;
7852
7853 if (block_end > BNX2_VPD_LEN)
7854 goto vpd_done;
7855
7856 j = pci_vpd_find_info_keyword(data, i, rosize,
7857 PCI_VPD_RO_KEYWORD_MFR_ID);
7858 if (j < 0)
7859 goto vpd_done;
7860
7861 len = pci_vpd_info_field_size(&data[j]);
7862
7863 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7864 if (j + len > block_end || len != 4 ||
7865 memcmp(&data[j], "1028", 4))
7866 goto vpd_done;
7867
7868 j = pci_vpd_find_info_keyword(data, i, rosize,
7869 PCI_VPD_RO_KEYWORD_VENDOR0);
7870 if (j < 0)
7871 goto vpd_done;
7872
7873 len = pci_vpd_info_field_size(&data[j]);
7874
7875 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7876 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7877 goto vpd_done;
7878
7879 memcpy(bp->fw_version, &data[j], len);
7880 bp->fw_version[len] = ' ';
7881
7882 vpd_done:
7883 kfree(data);
7884 }
7885
7886 static int __devinit
7887 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7888 {
7889 struct bnx2 *bp;
7890 unsigned long mem_len;
7891 int rc, i, j;
7892 u32 reg;
7893 u64 dma_mask, persist_dma_mask;
7894 int err;
7895
7896 SET_NETDEV_DEV(dev, &pdev->dev);
7897 bp = netdev_priv(dev);
7898
7899 bp->flags = 0;
7900 bp->phy_flags = 0;
7901
7902 bp->temp_stats_blk =
7903 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7904
7905 if (bp->temp_stats_blk == NULL) {
7906 rc = -ENOMEM;
7907 goto err_out;
7908 }
7909
7910 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7911 rc = pci_enable_device(pdev);
7912 if (rc) {
7913 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7914 goto err_out;
7915 }
7916
7917 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7918 dev_err(&pdev->dev,
7919 "Cannot find PCI device base address, aborting\n");
7920 rc = -ENODEV;
7921 goto err_out_disable;
7922 }
7923
7924 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7925 if (rc) {
7926 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7927 goto err_out_disable;
7928 }
7929
7930 /* AER (Advanced Error Reporting) hooks */
7931 err = pci_enable_pcie_error_reporting(pdev);
7932 if (err) {
7933 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
7934 "0x%x\n", err);
7935 /* non-fatal, continue */
7936 }
7937
7938 pci_set_master(pdev);
7939 pci_save_state(pdev);
7940
7941 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7942 if (bp->pm_cap == 0) {
7943 dev_err(&pdev->dev,
7944 "Cannot find power management capability, aborting\n");
7945 rc = -EIO;
7946 goto err_out_release;
7947 }
7948
7949 bp->dev = dev;
7950 bp->pdev = pdev;
7951
7952 spin_lock_init(&bp->phy_lock);
7953 spin_lock_init(&bp->indirect_lock);
7954 #ifdef BCM_CNIC
7955 mutex_init(&bp->cnic_lock);
7956 #endif
7957 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7958
7959 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7960 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7961 dev->mem_end = dev->mem_start + mem_len;
7962 dev->irq = pdev->irq;
7963
7964 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7965
7966 if (!bp->regview) {
7967 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7968 rc = -ENOMEM;
7969 goto err_out_release;
7970 }
7971
7972 /* Configure byte swap and enable write to the reg_window registers.
7973 * Rely on CPU to do target byte swapping on big endian systems
7974 * The chip's target access swapping will not swap all accesses
7975 */
7976 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7977 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7978 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7979
7980 bnx2_set_power_state(bp, PCI_D0);
7981
7982 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7983
7984 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7985 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7986 dev_err(&pdev->dev,
7987 "Cannot find PCIE capability, aborting\n");
7988 rc = -EIO;
7989 goto err_out_unmap;
7990 }
7991 bp->flags |= BNX2_FLAG_PCIE;
7992 if (CHIP_REV(bp) == CHIP_REV_Ax)
7993 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7994 } else {
7995 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7996 if (bp->pcix_cap == 0) {
7997 dev_err(&pdev->dev,
7998 "Cannot find PCIX capability, aborting\n");
7999 rc = -EIO;
8000 goto err_out_unmap;
8001 }
8002 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8003 }
8004
8005 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
8006 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8007 bp->flags |= BNX2_FLAG_MSIX_CAP;
8008 }
8009
8010 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
8011 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8012 bp->flags |= BNX2_FLAG_MSI_CAP;
8013 }
8014
8015 /* 5708 cannot support DMA addresses > 40-bit. */
8016 if (CHIP_NUM(bp) == CHIP_NUM_5708)
8017 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8018 else
8019 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8020
8021 /* Configure DMA attributes. */
8022 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8023 dev->features |= NETIF_F_HIGHDMA;
8024 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8025 if (rc) {
8026 dev_err(&pdev->dev,
8027 "pci_set_consistent_dma_mask failed, aborting\n");
8028 goto err_out_unmap;
8029 }
8030 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8031 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8032 goto err_out_unmap;
8033 }
8034
8035 if (!(bp->flags & BNX2_FLAG_PCIE))
8036 bnx2_get_pci_speed(bp);
8037
8038 /* 5706A0 may falsely detect SERR and PERR. */
8039 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8040 reg = REG_RD(bp, PCI_COMMAND);
8041 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8042 REG_WR(bp, PCI_COMMAND, reg);
8043 }
8044 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8045 !(bp->flags & BNX2_FLAG_PCIX)) {
8046
8047 dev_err(&pdev->dev,
8048 "5706 A1 can only be used in a PCIX bus, aborting\n");
8049 goto err_out_unmap;
8050 }
8051
8052 bnx2_init_nvram(bp);
8053
8054 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8055
8056 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8057 BNX2_SHM_HDR_SIGNATURE_SIG) {
8058 u32 off = PCI_FUNC(pdev->devfn) << 2;
8059
8060 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8061 } else
8062 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8063
8064 /* Get the permanent MAC address. First we need to make sure the
8065 * firmware is actually running.
8066 */
8067 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8068
8069 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8070 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8071 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8072 rc = -ENODEV;
8073 goto err_out_unmap;
8074 }
8075
8076 bnx2_read_vpd_fw_ver(bp);
8077
8078 j = strlen(bp->fw_version);
8079 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8080 for (i = 0; i < 3 && j < 24; i++) {
8081 u8 num, k, skip0;
8082
8083 if (i == 0) {
8084 bp->fw_version[j++] = 'b';
8085 bp->fw_version[j++] = 'c';
8086 bp->fw_version[j++] = ' ';
8087 }
8088 num = (u8) (reg >> (24 - (i * 8)));
8089 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8090 if (num >= k || !skip0 || k == 1) {
8091 bp->fw_version[j++] = (num / k) + '0';
8092 skip0 = 0;
8093 }
8094 }
8095 if (i != 2)
8096 bp->fw_version[j++] = '.';
8097 }
8098 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8099 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8100 bp->wol = 1;
8101
8102 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8103 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8104
8105 for (i = 0; i < 30; i++) {
8106 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8107 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8108 break;
8109 msleep(10);
8110 }
8111 }
8112 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8113 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8114 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8115 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8116 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8117
8118 if (j < 32)
8119 bp->fw_version[j++] = ' ';
8120 for (i = 0; i < 3 && j < 28; i++) {
8121 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8122 reg = swab32(reg);
8123 memcpy(&bp->fw_version[j], &reg, 4);
8124 j += 4;
8125 }
8126 }
8127
8128 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8129 bp->mac_addr[0] = (u8) (reg >> 8);
8130 bp->mac_addr[1] = (u8) reg;
8131
8132 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8133 bp->mac_addr[2] = (u8) (reg >> 24);
8134 bp->mac_addr[3] = (u8) (reg >> 16);
8135 bp->mac_addr[4] = (u8) (reg >> 8);
8136 bp->mac_addr[5] = (u8) reg;
8137
8138 bp->tx_ring_size = MAX_TX_DESC_CNT;
8139 bnx2_set_rx_ring_size(bp, 255);
8140
8141 bp->rx_csum = 1;
8142
8143 bp->tx_quick_cons_trip_int = 2;
8144 bp->tx_quick_cons_trip = 20;
8145 bp->tx_ticks_int = 18;
8146 bp->tx_ticks = 80;
8147
8148 bp->rx_quick_cons_trip_int = 2;
8149 bp->rx_quick_cons_trip = 12;
8150 bp->rx_ticks_int = 18;
8151 bp->rx_ticks = 18;
8152
8153 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8154
8155 bp->current_interval = BNX2_TIMER_INTERVAL;
8156
8157 bp->phy_addr = 1;
8158
8159 /* Disable WOL support if we are running on a SERDES chip. */
8160 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8161 bnx2_get_5709_media(bp);
8162 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8163 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8164
8165 bp->phy_port = PORT_TP;
8166 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8167 bp->phy_port = PORT_FIBRE;
8168 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8169 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8170 bp->flags |= BNX2_FLAG_NO_WOL;
8171 bp->wol = 0;
8172 }
8173 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8174 /* Don't do parallel detect on this board because of
8175 * some board problems. The link will not go down
8176 * if we do parallel detect.
8177 */
8178 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8179 pdev->subsystem_device == 0x310c)
8180 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8181 } else {
8182 bp->phy_addr = 2;
8183 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8184 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8185 }
8186 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8187 CHIP_NUM(bp) == CHIP_NUM_5708)
8188 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8189 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8190 (CHIP_REV(bp) == CHIP_REV_Ax ||
8191 CHIP_REV(bp) == CHIP_REV_Bx))
8192 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8193
8194 bnx2_init_fw_cap(bp);
8195
8196 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8197 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8198 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8199 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8200 bp->flags |= BNX2_FLAG_NO_WOL;
8201 bp->wol = 0;
8202 }
8203
8204 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8205 bp->tx_quick_cons_trip_int =
8206 bp->tx_quick_cons_trip;
8207 bp->tx_ticks_int = bp->tx_ticks;
8208 bp->rx_quick_cons_trip_int =
8209 bp->rx_quick_cons_trip;
8210 bp->rx_ticks_int = bp->rx_ticks;
8211 bp->comp_prod_trip_int = bp->comp_prod_trip;
8212 bp->com_ticks_int = bp->com_ticks;
8213 bp->cmd_ticks_int = bp->cmd_ticks;
8214 }
8215
8216 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8217 *
8218 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8219 * with byte enables disabled on the unused 32-bit word. This is legal
8220 * but causes problems on the AMD 8132 which will eventually stop
8221 * responding after a while.
8222 *
8223 * AMD believes this incompatibility is unique to the 5706, and
8224 * prefers to locally disable MSI rather than globally disabling it.
8225 */
8226 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8227 struct pci_dev *amd_8132 = NULL;
8228
8229 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8230 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8231 amd_8132))) {
8232
8233 if (amd_8132->revision >= 0x10 &&
8234 amd_8132->revision <= 0x13) {
8235 disable_msi = 1;
8236 pci_dev_put(amd_8132);
8237 break;
8238 }
8239 }
8240 }
8241
8242 bnx2_set_default_link(bp);
8243 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8244
8245 init_timer(&bp->timer);
8246 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8247 bp->timer.data = (unsigned long) bp;
8248 bp->timer.function = bnx2_timer;
8249
8250 return 0;
8251
8252 err_out_unmap:
8253 if (bp->regview) {
8254 iounmap(bp->regview);
8255 bp->regview = NULL;
8256 }
8257
8258 err_out_release:
8259 pci_disable_pcie_error_reporting(pdev);
8260 pci_release_regions(pdev);
8261
8262 err_out_disable:
8263 pci_disable_device(pdev);
8264 pci_set_drvdata(pdev, NULL);
8265
8266 err_out:
8267 return rc;
8268 }
8269
8270 static char * __devinit
8271 bnx2_bus_string(struct bnx2 *bp, char *str)
8272 {
8273 char *s = str;
8274
8275 if (bp->flags & BNX2_FLAG_PCIE) {
8276 s += sprintf(s, "PCI Express");
8277 } else {
8278 s += sprintf(s, "PCI");
8279 if (bp->flags & BNX2_FLAG_PCIX)
8280 s += sprintf(s, "-X");
8281 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8282 s += sprintf(s, " 32-bit");
8283 else
8284 s += sprintf(s, " 64-bit");
8285 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8286 }
8287 return str;
8288 }
8289
8290 static void
8291 bnx2_del_napi(struct bnx2 *bp)
8292 {
8293 int i;
8294
8295 for (i = 0; i < bp->irq_nvecs; i++)
8296 netif_napi_del(&bp->bnx2_napi[i].napi);
8297 }
8298
8299 static void
8300 bnx2_init_napi(struct bnx2 *bp)
8301 {
8302 int i;
8303
8304 for (i = 0; i < bp->irq_nvecs; i++) {
8305 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8306 int (*poll)(struct napi_struct *, int);
8307
8308 if (i == 0)
8309 poll = bnx2_poll;
8310 else
8311 poll = bnx2_poll_msix;
8312
8313 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8314 bnapi->bp = bp;
8315 }
8316 }
8317
8318 static const struct net_device_ops bnx2_netdev_ops = {
8319 .ndo_open = bnx2_open,
8320 .ndo_start_xmit = bnx2_start_xmit,
8321 .ndo_stop = bnx2_close,
8322 .ndo_get_stats64 = bnx2_get_stats64,
8323 .ndo_set_rx_mode = bnx2_set_rx_mode,
8324 .ndo_do_ioctl = bnx2_ioctl,
8325 .ndo_validate_addr = eth_validate_addr,
8326 .ndo_set_mac_address = bnx2_change_mac_addr,
8327 .ndo_change_mtu = bnx2_change_mtu,
8328 .ndo_tx_timeout = bnx2_tx_timeout,
8329 #ifdef BCM_VLAN
8330 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8331 #endif
8332 #ifdef CONFIG_NET_POLL_CONTROLLER
8333 .ndo_poll_controller = poll_bnx2,
8334 #endif
8335 };
8336
8337 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8338 {
8339 #ifdef BCM_VLAN
8340 dev->vlan_features |= flags;
8341 #endif
8342 }
8343
8344 static int __devinit
8345 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8346 {
8347 static int version_printed = 0;
8348 struct net_device *dev = NULL;
8349 struct bnx2 *bp;
8350 int rc;
8351 char str[40];
8352
8353 if (version_printed++ == 0)
8354 pr_info("%s", version);
8355
8356 /* dev zeroed in init_etherdev */
8357 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8358
8359 if (!dev)
8360 return -ENOMEM;
8361
8362 rc = bnx2_init_board(pdev, dev);
8363 if (rc < 0) {
8364 free_netdev(dev);
8365 return rc;
8366 }
8367
8368 dev->netdev_ops = &bnx2_netdev_ops;
8369 dev->watchdog_timeo = TX_TIMEOUT;
8370 dev->ethtool_ops = &bnx2_ethtool_ops;
8371
8372 bp = netdev_priv(dev);
8373
8374 pci_set_drvdata(pdev, dev);
8375
8376 rc = bnx2_request_firmware(bp);
8377 if (rc)
8378 goto error;
8379
8380 memcpy(dev->dev_addr, bp->mac_addr, 6);
8381 memcpy(dev->perm_addr, bp->mac_addr, 6);
8382
8383 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO |
8384 NETIF_F_RXHASH;
8385 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8386 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8387 dev->features |= NETIF_F_IPV6_CSUM;
8388 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8389 }
8390 #ifdef BCM_VLAN
8391 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8392 #endif
8393 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8394 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8395 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8396 dev->features |= NETIF_F_TSO6;
8397 vlan_features_add(dev, NETIF_F_TSO6);
8398 }
8399 if ((rc = register_netdev(dev))) {
8400 dev_err(&pdev->dev, "Cannot register net device\n");
8401 goto error;
8402 }
8403
8404 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8405 board_info[ent->driver_data].name,
8406 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8407 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8408 bnx2_bus_string(bp, str),
8409 dev->base_addr,
8410 bp->pdev->irq, dev->dev_addr);
8411
8412 return 0;
8413
8414 error:
8415 if (bp->mips_firmware)
8416 release_firmware(bp->mips_firmware);
8417 if (bp->rv2p_firmware)
8418 release_firmware(bp->rv2p_firmware);
8419
8420 if (bp->regview)
8421 iounmap(bp->regview);
8422 pci_release_regions(pdev);
8423 pci_disable_device(pdev);
8424 pci_set_drvdata(pdev, NULL);
8425 free_netdev(dev);
8426 return rc;
8427 }
8428
8429 static void __devexit
8430 bnx2_remove_one(struct pci_dev *pdev)
8431 {
8432 struct net_device *dev = pci_get_drvdata(pdev);
8433 struct bnx2 *bp = netdev_priv(dev);
8434
8435 flush_scheduled_work();
8436
8437 unregister_netdev(dev);
8438
8439 if (bp->mips_firmware)
8440 release_firmware(bp->mips_firmware);
8441 if (bp->rv2p_firmware)
8442 release_firmware(bp->rv2p_firmware);
8443
8444 if (bp->regview)
8445 iounmap(bp->regview);
8446
8447 kfree(bp->temp_stats_blk);
8448
8449 free_netdev(dev);
8450
8451 pci_disable_pcie_error_reporting(pdev);
8452
8453 pci_release_regions(pdev);
8454 pci_disable_device(pdev);
8455 pci_set_drvdata(pdev, NULL);
8456 }
8457
8458 static int
8459 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8460 {
8461 struct net_device *dev = pci_get_drvdata(pdev);
8462 struct bnx2 *bp = netdev_priv(dev);
8463
8464 /* PCI register 4 needs to be saved whether netif_running() or not.
8465 * MSI address and data need to be saved if using MSI and
8466 * netif_running().
8467 */
8468 pci_save_state(pdev);
8469 if (!netif_running(dev))
8470 return 0;
8471
8472 flush_scheduled_work();
8473 bnx2_netif_stop(bp, true);
8474 netif_device_detach(dev);
8475 del_timer_sync(&bp->timer);
8476 bnx2_shutdown_chip(bp);
8477 bnx2_free_skbs(bp);
8478 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8479 return 0;
8480 }
8481
8482 static int
8483 bnx2_resume(struct pci_dev *pdev)
8484 {
8485 struct net_device *dev = pci_get_drvdata(pdev);
8486 struct bnx2 *bp = netdev_priv(dev);
8487
8488 pci_restore_state(pdev);
8489 if (!netif_running(dev))
8490 return 0;
8491
8492 bnx2_set_power_state(bp, PCI_D0);
8493 netif_device_attach(dev);
8494 bnx2_init_nic(bp, 1);
8495 bnx2_netif_start(bp, true);
8496 return 0;
8497 }
8498
8499 /**
8500 * bnx2_io_error_detected - called when PCI error is detected
8501 * @pdev: Pointer to PCI device
8502 * @state: The current pci connection state
8503 *
8504 * This function is called after a PCI bus error affecting
8505 * this device has been detected.
8506 */
8507 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8508 pci_channel_state_t state)
8509 {
8510 struct net_device *dev = pci_get_drvdata(pdev);
8511 struct bnx2 *bp = netdev_priv(dev);
8512
8513 rtnl_lock();
8514 netif_device_detach(dev);
8515
8516 if (state == pci_channel_io_perm_failure) {
8517 rtnl_unlock();
8518 return PCI_ERS_RESULT_DISCONNECT;
8519 }
8520
8521 if (netif_running(dev)) {
8522 bnx2_netif_stop(bp, true);
8523 del_timer_sync(&bp->timer);
8524 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8525 }
8526
8527 pci_disable_device(pdev);
8528 rtnl_unlock();
8529
8530 /* Request a slot slot reset. */
8531 return PCI_ERS_RESULT_NEED_RESET;
8532 }
8533
8534 /**
8535 * bnx2_io_slot_reset - called after the pci bus has been reset.
8536 * @pdev: Pointer to PCI device
8537 *
8538 * Restart the card from scratch, as if from a cold-boot.
8539 */
8540 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8541 {
8542 struct net_device *dev = pci_get_drvdata(pdev);
8543 struct bnx2 *bp = netdev_priv(dev);
8544 pci_ers_result_t result;
8545 int err;
8546
8547 rtnl_lock();
8548 if (pci_enable_device(pdev)) {
8549 dev_err(&pdev->dev,
8550 "Cannot re-enable PCI device after reset\n");
8551 result = PCI_ERS_RESULT_DISCONNECT;
8552 } else {
8553 pci_set_master(pdev);
8554 pci_restore_state(pdev);
8555 pci_save_state(pdev);
8556
8557 if (netif_running(dev)) {
8558 bnx2_set_power_state(bp, PCI_D0);
8559 bnx2_init_nic(bp, 1);
8560 }
8561 result = PCI_ERS_RESULT_RECOVERED;
8562 }
8563 rtnl_unlock();
8564
8565 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8566 if (err) {
8567 dev_err(&pdev->dev,
8568 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8569 err); /* non-fatal, continue */
8570 }
8571
8572 return result;
8573 }
8574
8575 /**
8576 * bnx2_io_resume - called when traffic can start flowing again.
8577 * @pdev: Pointer to PCI device
8578 *
8579 * This callback is called when the error recovery driver tells us that
8580 * its OK to resume normal operation.
8581 */
8582 static void bnx2_io_resume(struct pci_dev *pdev)
8583 {
8584 struct net_device *dev = pci_get_drvdata(pdev);
8585 struct bnx2 *bp = netdev_priv(dev);
8586
8587 rtnl_lock();
8588 if (netif_running(dev))
8589 bnx2_netif_start(bp, true);
8590
8591 netif_device_attach(dev);
8592 rtnl_unlock();
8593 }
8594
8595 static struct pci_error_handlers bnx2_err_handler = {
8596 .error_detected = bnx2_io_error_detected,
8597 .slot_reset = bnx2_io_slot_reset,
8598 .resume = bnx2_io_resume,
8599 };
8600
8601 static struct pci_driver bnx2_pci_driver = {
8602 .name = DRV_MODULE_NAME,
8603 .id_table = bnx2_pci_tbl,
8604 .probe = bnx2_init_one,
8605 .remove = __devexit_p(bnx2_remove_one),
8606 .suspend = bnx2_suspend,
8607 .resume = bnx2_resume,
8608 .err_handler = &bnx2_err_handler,
8609 };
8610
8611 static int __init bnx2_init(void)
8612 {
8613 return pci_register_driver(&bnx2_pci_driver);
8614 }
8615
8616 static void __exit bnx2_cleanup(void)
8617 {
8618 pci_unregister_driver(&bnx2_pci_driver);
8619 }
8620
8621 module_init(bnx2_init);
8622 module_exit(bnx2_cleanup);
8623
8624
8625