Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / broadcom / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2011 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/timer.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME "bnx2"
61 #define DRV_MODULE_VERSION "2.2.3"
62 #define DRV_MODULE_RELDATE "June 27, 2012"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
66 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
73
74 static char version[] =
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93 BCM5706 = 0,
94 NC370T,
95 NC370I,
96 BCM5706S,
97 NC370F,
98 BCM5708,
99 BCM5708S,
100 BCM5709,
101 BCM5709S,
102 BCM5716,
103 BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108 char *name;
109 } board_info[] = {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121 };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 { PCI_VENDOR_ID_BROADCOM, 0x163b,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 { PCI_VENDOR_ID_BROADCOM, 0x163c,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146 { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
153 /* Slow EEPROM */
154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 "EEPROM - slow"},
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 "Entry 0001"},
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 "Non-buffered flash (256kB)"},
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 "Entry 0100"},
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 "Non-buffered flash (64kB)"},
196 /* Fast EEPROM */
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 "EEPROM - fast"},
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1001"},
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1010"},
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1100"},
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1101"},
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239 .flags = BNX2_NV_BUFFERED,
240 .page_bits = BCM5709_FLASH_PAGE_BITS,
241 .page_size = BCM5709_FLASH_PAGE_SIZE,
242 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244 .name = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254 u32 diff;
255
256 /* Tell compiler to fetch tx_prod and tx_cons from memory. */
257 barrier();
258
259 /* The ring uses 256 indices for 255 entries, one of them
260 * needs to be skipped.
261 */
262 diff = txr->tx_prod - txr->tx_cons;
263 if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264 diff &= 0xffff;
265 if (diff == BNX2_TX_DESC_CNT)
266 diff = BNX2_MAX_TX_DESC_CNT;
267 }
268 return bp->tx_ring_size - diff;
269 }
270
271 static u32
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273 {
274 u32 val;
275
276 spin_lock_bh(&bp->indirect_lock);
277 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279 spin_unlock_bh(&bp->indirect_lock);
280 return val;
281 }
282
283 static void
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 {
286 spin_lock_bh(&bp->indirect_lock);
287 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289 spin_unlock_bh(&bp->indirect_lock);
290 }
291
292 static void
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 {
295 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296 }
297
298 static u32
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 {
301 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302 }
303
304 static void
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306 {
307 offset += cid_addr;
308 spin_lock_bh(&bp->indirect_lock);
309 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
310 int i;
311
312 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315 for (i = 0; i < 5; i++) {
316 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318 break;
319 udelay(5);
320 }
321 } else {
322 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323 BNX2_WR(bp, BNX2_CTX_DATA, val);
324 }
325 spin_unlock_bh(&bp->indirect_lock);
326 }
327
328 #ifdef BCM_CNIC
329 static int
330 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 {
332 struct bnx2 *bp = netdev_priv(dev);
333 struct drv_ctl_io *io = &info->data.io;
334
335 switch (info->cmd) {
336 case DRV_CTL_IO_WR_CMD:
337 bnx2_reg_wr_ind(bp, io->offset, io->data);
338 break;
339 case DRV_CTL_IO_RD_CMD:
340 io->data = bnx2_reg_rd_ind(bp, io->offset);
341 break;
342 case DRV_CTL_CTX_WR_CMD:
343 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344 break;
345 default:
346 return -EINVAL;
347 }
348 return 0;
349 }
350
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 {
353 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355 int sb_id;
356
357 if (bp->flags & BNX2_FLAG_USING_MSIX) {
358 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359 bnapi->cnic_present = 0;
360 sb_id = bp->irq_nvecs;
361 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362 } else {
363 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364 bnapi->cnic_tag = bnapi->last_status_idx;
365 bnapi->cnic_present = 1;
366 sb_id = 0;
367 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368 }
369
370 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371 cp->irq_arr[0].status_blk = (void *)
372 ((unsigned long) bnapi->status_blk.msi +
373 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374 cp->irq_arr[0].status_blk_num = sb_id;
375 cp->num_irq = 1;
376 }
377
378 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379 void *data)
380 {
381 struct bnx2 *bp = netdev_priv(dev);
382 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383
384 if (ops == NULL)
385 return -EINVAL;
386
387 if (cp->drv_state & CNIC_DRV_STATE_REGD)
388 return -EBUSY;
389
390 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
391 return -ENODEV;
392
393 bp->cnic_data = data;
394 rcu_assign_pointer(bp->cnic_ops, ops);
395
396 cp->num_irq = 0;
397 cp->drv_state = CNIC_DRV_STATE_REGD;
398
399 bnx2_setup_cnic_irq_info(bp);
400
401 return 0;
402 }
403
404 static int bnx2_unregister_cnic(struct net_device *dev)
405 {
406 struct bnx2 *bp = netdev_priv(dev);
407 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
408 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
409
410 mutex_lock(&bp->cnic_lock);
411 cp->drv_state = 0;
412 bnapi->cnic_present = 0;
413 RCU_INIT_POINTER(bp->cnic_ops, NULL);
414 mutex_unlock(&bp->cnic_lock);
415 synchronize_rcu();
416 return 0;
417 }
418
419 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
420 {
421 struct bnx2 *bp = netdev_priv(dev);
422 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
423
424 if (!cp->max_iscsi_conn)
425 return NULL;
426
427 cp->drv_owner = THIS_MODULE;
428 cp->chip_id = bp->chip_id;
429 cp->pdev = bp->pdev;
430 cp->io_base = bp->regview;
431 cp->drv_ctl = bnx2_drv_ctl;
432 cp->drv_register_cnic = bnx2_register_cnic;
433 cp->drv_unregister_cnic = bnx2_unregister_cnic;
434
435 return cp;
436 }
437
438 static void
439 bnx2_cnic_stop(struct bnx2 *bp)
440 {
441 struct cnic_ops *c_ops;
442 struct cnic_ctl_info info;
443
444 mutex_lock(&bp->cnic_lock);
445 c_ops = rcu_dereference_protected(bp->cnic_ops,
446 lockdep_is_held(&bp->cnic_lock));
447 if (c_ops) {
448 info.cmd = CNIC_CTL_STOP_CMD;
449 c_ops->cnic_ctl(bp->cnic_data, &info);
450 }
451 mutex_unlock(&bp->cnic_lock);
452 }
453
454 static void
455 bnx2_cnic_start(struct bnx2 *bp)
456 {
457 struct cnic_ops *c_ops;
458 struct cnic_ctl_info info;
459
460 mutex_lock(&bp->cnic_lock);
461 c_ops = rcu_dereference_protected(bp->cnic_ops,
462 lockdep_is_held(&bp->cnic_lock));
463 if (c_ops) {
464 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
465 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466
467 bnapi->cnic_tag = bnapi->last_status_idx;
468 }
469 info.cmd = CNIC_CTL_START_CMD;
470 c_ops->cnic_ctl(bp->cnic_data, &info);
471 }
472 mutex_unlock(&bp->cnic_lock);
473 }
474
475 #else
476
477 static void
478 bnx2_cnic_stop(struct bnx2 *bp)
479 {
480 }
481
482 static void
483 bnx2_cnic_start(struct bnx2 *bp)
484 {
485 }
486
487 #endif
488
489 static int
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 {
492 u32 val1;
493 int i, ret;
494
495 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
496 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
498
499 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
500 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501
502 udelay(40);
503 }
504
505 val1 = (bp->phy_addr << 21) | (reg << 16) |
506 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
507 BNX2_EMAC_MDIO_COMM_START_BUSY;
508 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509
510 for (i = 0; i < 50; i++) {
511 udelay(10);
512
513 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
515 udelay(5);
516
517 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
518 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
519
520 break;
521 }
522 }
523
524 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525 *val = 0x0;
526 ret = -EBUSY;
527 }
528 else {
529 *val = val1;
530 ret = 0;
531 }
532
533 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
534 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
536
537 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
538 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
539
540 udelay(40);
541 }
542
543 return ret;
544 }
545
546 static int
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 {
549 u32 val1;
550 int i, ret;
551
552 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
553 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
555
556 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
557 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558
559 udelay(40);
560 }
561
562 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
563 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
564 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
565 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566
567 for (i = 0; i < 50; i++) {
568 udelay(10);
569
570 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
571 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
572 udelay(5);
573 break;
574 }
575 }
576
577 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
578 ret = -EBUSY;
579 else
580 ret = 0;
581
582 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
583 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
585
586 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
587 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
588
589 udelay(40);
590 }
591
592 return ret;
593 }
594
595 static void
596 bnx2_disable_int(struct bnx2 *bp)
597 {
598 int i;
599 struct bnx2_napi *bnapi;
600
601 for (i = 0; i < bp->irq_nvecs; i++) {
602 bnapi = &bp->bnx2_napi[i];
603 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
604 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
605 }
606 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
607 }
608
609 static void
610 bnx2_enable_int(struct bnx2 *bp)
611 {
612 int i;
613 struct bnx2_napi *bnapi;
614
615 for (i = 0; i < bp->irq_nvecs; i++) {
616 bnapi = &bp->bnx2_napi[i];
617
618 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
620 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
621 bnapi->last_status_idx);
622
623 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
624 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
625 bnapi->last_status_idx);
626 }
627 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
628 }
629
630 static void
631 bnx2_disable_int_sync(struct bnx2 *bp)
632 {
633 int i;
634
635 atomic_inc(&bp->intr_sem);
636 if (!netif_running(bp->dev))
637 return;
638
639 bnx2_disable_int(bp);
640 for (i = 0; i < bp->irq_nvecs; i++)
641 synchronize_irq(bp->irq_tbl[i].vector);
642 }
643
644 static void
645 bnx2_napi_disable(struct bnx2 *bp)
646 {
647 int i;
648
649 for (i = 0; i < bp->irq_nvecs; i++)
650 napi_disable(&bp->bnx2_napi[i].napi);
651 }
652
653 static void
654 bnx2_napi_enable(struct bnx2 *bp)
655 {
656 int i;
657
658 for (i = 0; i < bp->irq_nvecs; i++)
659 napi_enable(&bp->bnx2_napi[i].napi);
660 }
661
662 static void
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
664 {
665 if (stop_cnic)
666 bnx2_cnic_stop(bp);
667 if (netif_running(bp->dev)) {
668 bnx2_napi_disable(bp);
669 netif_tx_disable(bp->dev);
670 }
671 bnx2_disable_int_sync(bp);
672 netif_carrier_off(bp->dev); /* prevent tx timeout */
673 }
674
675 static void
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
677 {
678 if (atomic_dec_and_test(&bp->intr_sem)) {
679 if (netif_running(bp->dev)) {
680 netif_tx_wake_all_queues(bp->dev);
681 spin_lock_bh(&bp->phy_lock);
682 if (bp->link_up)
683 netif_carrier_on(bp->dev);
684 spin_unlock_bh(&bp->phy_lock);
685 bnx2_napi_enable(bp);
686 bnx2_enable_int(bp);
687 if (start_cnic)
688 bnx2_cnic_start(bp);
689 }
690 }
691 }
692
693 static void
694 bnx2_free_tx_mem(struct bnx2 *bp)
695 {
696 int i;
697
698 for (i = 0; i < bp->num_tx_rings; i++) {
699 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
700 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
701
702 if (txr->tx_desc_ring) {
703 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
704 txr->tx_desc_ring,
705 txr->tx_desc_mapping);
706 txr->tx_desc_ring = NULL;
707 }
708 kfree(txr->tx_buf_ring);
709 txr->tx_buf_ring = NULL;
710 }
711 }
712
713 static void
714 bnx2_free_rx_mem(struct bnx2 *bp)
715 {
716 int i;
717
718 for (i = 0; i < bp->num_rx_rings; i++) {
719 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
720 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
721 int j;
722
723 for (j = 0; j < bp->rx_max_ring; j++) {
724 if (rxr->rx_desc_ring[j])
725 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
726 rxr->rx_desc_ring[j],
727 rxr->rx_desc_mapping[j]);
728 rxr->rx_desc_ring[j] = NULL;
729 }
730 vfree(rxr->rx_buf_ring);
731 rxr->rx_buf_ring = NULL;
732
733 for (j = 0; j < bp->rx_max_pg_ring; j++) {
734 if (rxr->rx_pg_desc_ring[j])
735 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
736 rxr->rx_pg_desc_ring[j],
737 rxr->rx_pg_desc_mapping[j]);
738 rxr->rx_pg_desc_ring[j] = NULL;
739 }
740 vfree(rxr->rx_pg_ring);
741 rxr->rx_pg_ring = NULL;
742 }
743 }
744
745 static int
746 bnx2_alloc_tx_mem(struct bnx2 *bp)
747 {
748 int i;
749
750 for (i = 0; i < bp->num_tx_rings; i++) {
751 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
752 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
753
754 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
755 if (txr->tx_buf_ring == NULL)
756 return -ENOMEM;
757
758 txr->tx_desc_ring =
759 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
760 &txr->tx_desc_mapping, GFP_KERNEL);
761 if (txr->tx_desc_ring == NULL)
762 return -ENOMEM;
763 }
764 return 0;
765 }
766
767 static int
768 bnx2_alloc_rx_mem(struct bnx2 *bp)
769 {
770 int i;
771
772 for (i = 0; i < bp->num_rx_rings; i++) {
773 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
775 int j;
776
777 rxr->rx_buf_ring =
778 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
779 if (rxr->rx_buf_ring == NULL)
780 return -ENOMEM;
781
782 for (j = 0; j < bp->rx_max_ring; j++) {
783 rxr->rx_desc_ring[j] =
784 dma_alloc_coherent(&bp->pdev->dev,
785 RXBD_RING_SIZE,
786 &rxr->rx_desc_mapping[j],
787 GFP_KERNEL);
788 if (rxr->rx_desc_ring[j] == NULL)
789 return -ENOMEM;
790
791 }
792
793 if (bp->rx_pg_ring_size) {
794 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
795 bp->rx_max_pg_ring);
796 if (rxr->rx_pg_ring == NULL)
797 return -ENOMEM;
798
799 }
800
801 for (j = 0; j < bp->rx_max_pg_ring; j++) {
802 rxr->rx_pg_desc_ring[j] =
803 dma_alloc_coherent(&bp->pdev->dev,
804 RXBD_RING_SIZE,
805 &rxr->rx_pg_desc_mapping[j],
806 GFP_KERNEL);
807 if (rxr->rx_pg_desc_ring[j] == NULL)
808 return -ENOMEM;
809
810 }
811 }
812 return 0;
813 }
814
815 static void
816 bnx2_free_mem(struct bnx2 *bp)
817 {
818 int i;
819 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
820
821 bnx2_free_tx_mem(bp);
822 bnx2_free_rx_mem(bp);
823
824 for (i = 0; i < bp->ctx_pages; i++) {
825 if (bp->ctx_blk[i]) {
826 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
827 bp->ctx_blk[i],
828 bp->ctx_blk_mapping[i]);
829 bp->ctx_blk[i] = NULL;
830 }
831 }
832 if (bnapi->status_blk.msi) {
833 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
834 bnapi->status_blk.msi,
835 bp->status_blk_mapping);
836 bnapi->status_blk.msi = NULL;
837 bp->stats_blk = NULL;
838 }
839 }
840
841 static int
842 bnx2_alloc_mem(struct bnx2 *bp)
843 {
844 int i, status_blk_size, err;
845 struct bnx2_napi *bnapi;
846 void *status_blk;
847
848 /* Combine status and statistics blocks into one allocation. */
849 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
850 if (bp->flags & BNX2_FLAG_MSIX_CAP)
851 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
852 BNX2_SBLK_MSIX_ALIGN_SIZE);
853 bp->status_stats_size = status_blk_size +
854 sizeof(struct statistics_block);
855
856 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 &bp->status_blk_mapping, GFP_KERNEL);
858 if (status_blk == NULL)
859 goto alloc_mem_err;
860
861 memset(status_blk, 0, bp->status_stats_size);
862
863 bnapi = &bp->bnx2_napi[0];
864 bnapi->status_blk.msi = status_blk;
865 bnapi->hw_tx_cons_ptr =
866 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
867 bnapi->hw_rx_cons_ptr =
868 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
869 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
870 for (i = 1; i < bp->irq_nvecs; i++) {
871 struct status_block_msix *sblk;
872
873 bnapi = &bp->bnx2_napi[i];
874
875 sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
876 bnapi->status_blk.msix = sblk;
877 bnapi->hw_tx_cons_ptr =
878 &sblk->status_tx_quick_consumer_index;
879 bnapi->hw_rx_cons_ptr =
880 &sblk->status_rx_quick_consumer_index;
881 bnapi->int_num = i << 24;
882 }
883 }
884
885 bp->stats_blk = status_blk + status_blk_size;
886
887 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
888
889 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
890 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
891 if (bp->ctx_pages == 0)
892 bp->ctx_pages = 1;
893 for (i = 0; i < bp->ctx_pages; i++) {
894 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
895 BNX2_PAGE_SIZE,
896 &bp->ctx_blk_mapping[i],
897 GFP_KERNEL);
898 if (bp->ctx_blk[i] == NULL)
899 goto alloc_mem_err;
900 }
901 }
902
903 err = bnx2_alloc_rx_mem(bp);
904 if (err)
905 goto alloc_mem_err;
906
907 err = bnx2_alloc_tx_mem(bp);
908 if (err)
909 goto alloc_mem_err;
910
911 return 0;
912
913 alloc_mem_err:
914 bnx2_free_mem(bp);
915 return -ENOMEM;
916 }
917
918 static void
919 bnx2_report_fw_link(struct bnx2 *bp)
920 {
921 u32 fw_link_status = 0;
922
923 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
924 return;
925
926 if (bp->link_up) {
927 u32 bmsr;
928
929 switch (bp->line_speed) {
930 case SPEED_10:
931 if (bp->duplex == DUPLEX_HALF)
932 fw_link_status = BNX2_LINK_STATUS_10HALF;
933 else
934 fw_link_status = BNX2_LINK_STATUS_10FULL;
935 break;
936 case SPEED_100:
937 if (bp->duplex == DUPLEX_HALF)
938 fw_link_status = BNX2_LINK_STATUS_100HALF;
939 else
940 fw_link_status = BNX2_LINK_STATUS_100FULL;
941 break;
942 case SPEED_1000:
943 if (bp->duplex == DUPLEX_HALF)
944 fw_link_status = BNX2_LINK_STATUS_1000HALF;
945 else
946 fw_link_status = BNX2_LINK_STATUS_1000FULL;
947 break;
948 case SPEED_2500:
949 if (bp->duplex == DUPLEX_HALF)
950 fw_link_status = BNX2_LINK_STATUS_2500HALF;
951 else
952 fw_link_status = BNX2_LINK_STATUS_2500FULL;
953 break;
954 }
955
956 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
957
958 if (bp->autoneg) {
959 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
960
961 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
963
964 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
965 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
966 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
967 else
968 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
969 }
970 }
971 else
972 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
973
974 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
975 }
976
977 static char *
978 bnx2_xceiver_str(struct bnx2 *bp)
979 {
980 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
981 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
982 "Copper");
983 }
984
985 static void
986 bnx2_report_link(struct bnx2 *bp)
987 {
988 if (bp->link_up) {
989 netif_carrier_on(bp->dev);
990 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
991 bnx2_xceiver_str(bp),
992 bp->line_speed,
993 bp->duplex == DUPLEX_FULL ? "full" : "half");
994
995 if (bp->flow_ctrl) {
996 if (bp->flow_ctrl & FLOW_CTRL_RX) {
997 pr_cont(", receive ");
998 if (bp->flow_ctrl & FLOW_CTRL_TX)
999 pr_cont("& transmit ");
1000 }
1001 else {
1002 pr_cont(", transmit ");
1003 }
1004 pr_cont("flow control ON");
1005 }
1006 pr_cont("\n");
1007 } else {
1008 netif_carrier_off(bp->dev);
1009 netdev_err(bp->dev, "NIC %s Link is Down\n",
1010 bnx2_xceiver_str(bp));
1011 }
1012
1013 bnx2_report_fw_link(bp);
1014 }
1015
1016 static void
1017 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1018 {
1019 u32 local_adv, remote_adv;
1020
1021 bp->flow_ctrl = 0;
1022 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1023 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1024
1025 if (bp->duplex == DUPLEX_FULL) {
1026 bp->flow_ctrl = bp->req_flow_ctrl;
1027 }
1028 return;
1029 }
1030
1031 if (bp->duplex != DUPLEX_FULL) {
1032 return;
1033 }
1034
1035 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1036 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1037 u32 val;
1038
1039 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1040 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1041 bp->flow_ctrl |= FLOW_CTRL_TX;
1042 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1043 bp->flow_ctrl |= FLOW_CTRL_RX;
1044 return;
1045 }
1046
1047 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1048 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1049
1050 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1051 u32 new_local_adv = 0;
1052 u32 new_remote_adv = 0;
1053
1054 if (local_adv & ADVERTISE_1000XPAUSE)
1055 new_local_adv |= ADVERTISE_PAUSE_CAP;
1056 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1057 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1058 if (remote_adv & ADVERTISE_1000XPAUSE)
1059 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1060 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1061 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1062
1063 local_adv = new_local_adv;
1064 remote_adv = new_remote_adv;
1065 }
1066
1067 /* See Table 28B-3 of 802.3ab-1999 spec. */
1068 if (local_adv & ADVERTISE_PAUSE_CAP) {
1069 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1070 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1071 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1072 }
1073 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1074 bp->flow_ctrl = FLOW_CTRL_RX;
1075 }
1076 }
1077 else {
1078 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1079 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1080 }
1081 }
1082 }
1083 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1084 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1085 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1086
1087 bp->flow_ctrl = FLOW_CTRL_TX;
1088 }
1089 }
1090 }
1091
1092 static int
1093 bnx2_5709s_linkup(struct bnx2 *bp)
1094 {
1095 u32 val, speed;
1096
1097 bp->link_up = 1;
1098
1099 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1100 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1101 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1102
1103 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1104 bp->line_speed = bp->req_line_speed;
1105 bp->duplex = bp->req_duplex;
1106 return 0;
1107 }
1108 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1109 switch (speed) {
1110 case MII_BNX2_GP_TOP_AN_SPEED_10:
1111 bp->line_speed = SPEED_10;
1112 break;
1113 case MII_BNX2_GP_TOP_AN_SPEED_100:
1114 bp->line_speed = SPEED_100;
1115 break;
1116 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1117 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1118 bp->line_speed = SPEED_1000;
1119 break;
1120 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1121 bp->line_speed = SPEED_2500;
1122 break;
1123 }
1124 if (val & MII_BNX2_GP_TOP_AN_FD)
1125 bp->duplex = DUPLEX_FULL;
1126 else
1127 bp->duplex = DUPLEX_HALF;
1128 return 0;
1129 }
1130
1131 static int
1132 bnx2_5708s_linkup(struct bnx2 *bp)
1133 {
1134 u32 val;
1135
1136 bp->link_up = 1;
1137 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1138 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1139 case BCM5708S_1000X_STAT1_SPEED_10:
1140 bp->line_speed = SPEED_10;
1141 break;
1142 case BCM5708S_1000X_STAT1_SPEED_100:
1143 bp->line_speed = SPEED_100;
1144 break;
1145 case BCM5708S_1000X_STAT1_SPEED_1G:
1146 bp->line_speed = SPEED_1000;
1147 break;
1148 case BCM5708S_1000X_STAT1_SPEED_2G5:
1149 bp->line_speed = SPEED_2500;
1150 break;
1151 }
1152 if (val & BCM5708S_1000X_STAT1_FD)
1153 bp->duplex = DUPLEX_FULL;
1154 else
1155 bp->duplex = DUPLEX_HALF;
1156
1157 return 0;
1158 }
1159
1160 static int
1161 bnx2_5706s_linkup(struct bnx2 *bp)
1162 {
1163 u32 bmcr, local_adv, remote_adv, common;
1164
1165 bp->link_up = 1;
1166 bp->line_speed = SPEED_1000;
1167
1168 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1169 if (bmcr & BMCR_FULLDPLX) {
1170 bp->duplex = DUPLEX_FULL;
1171 }
1172 else {
1173 bp->duplex = DUPLEX_HALF;
1174 }
1175
1176 if (!(bmcr & BMCR_ANENABLE)) {
1177 return 0;
1178 }
1179
1180 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1181 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1182
1183 common = local_adv & remote_adv;
1184 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1185
1186 if (common & ADVERTISE_1000XFULL) {
1187 bp->duplex = DUPLEX_FULL;
1188 }
1189 else {
1190 bp->duplex = DUPLEX_HALF;
1191 }
1192 }
1193
1194 return 0;
1195 }
1196
1197 static int
1198 bnx2_copper_linkup(struct bnx2 *bp)
1199 {
1200 u32 bmcr;
1201
1202 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1203 if (bmcr & BMCR_ANENABLE) {
1204 u32 local_adv, remote_adv, common;
1205
1206 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1207 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208
1209 common = local_adv & (remote_adv >> 2);
1210 if (common & ADVERTISE_1000FULL) {
1211 bp->line_speed = SPEED_1000;
1212 bp->duplex = DUPLEX_FULL;
1213 }
1214 else if (common & ADVERTISE_1000HALF) {
1215 bp->line_speed = SPEED_1000;
1216 bp->duplex = DUPLEX_HALF;
1217 }
1218 else {
1219 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1220 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1221
1222 common = local_adv & remote_adv;
1223 if (common & ADVERTISE_100FULL) {
1224 bp->line_speed = SPEED_100;
1225 bp->duplex = DUPLEX_FULL;
1226 }
1227 else if (common & ADVERTISE_100HALF) {
1228 bp->line_speed = SPEED_100;
1229 bp->duplex = DUPLEX_HALF;
1230 }
1231 else if (common & ADVERTISE_10FULL) {
1232 bp->line_speed = SPEED_10;
1233 bp->duplex = DUPLEX_FULL;
1234 }
1235 else if (common & ADVERTISE_10HALF) {
1236 bp->line_speed = SPEED_10;
1237 bp->duplex = DUPLEX_HALF;
1238 }
1239 else {
1240 bp->line_speed = 0;
1241 bp->link_up = 0;
1242 }
1243 }
1244 }
1245 else {
1246 if (bmcr & BMCR_SPEED100) {
1247 bp->line_speed = SPEED_100;
1248 }
1249 else {
1250 bp->line_speed = SPEED_10;
1251 }
1252 if (bmcr & BMCR_FULLDPLX) {
1253 bp->duplex = DUPLEX_FULL;
1254 }
1255 else {
1256 bp->duplex = DUPLEX_HALF;
1257 }
1258 }
1259
1260 return 0;
1261 }
1262
1263 static void
1264 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1265 {
1266 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1267
1268 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1269 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1270 val |= 0x02 << 8;
1271
1272 if (bp->flow_ctrl & FLOW_CTRL_TX)
1273 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1274
1275 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1276 }
1277
1278 static void
1279 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1280 {
1281 int i;
1282 u32 cid;
1283
1284 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1285 if (i == 1)
1286 cid = RX_RSS_CID;
1287 bnx2_init_rx_context(bp, cid);
1288 }
1289 }
1290
1291 static void
1292 bnx2_set_mac_link(struct bnx2 *bp)
1293 {
1294 u32 val;
1295
1296 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1297 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1298 (bp->duplex == DUPLEX_HALF)) {
1299 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1300 }
1301
1302 /* Configure the EMAC mode register. */
1303 val = BNX2_RD(bp, BNX2_EMAC_MODE);
1304
1305 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1306 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1307 BNX2_EMAC_MODE_25G_MODE);
1308
1309 if (bp->link_up) {
1310 switch (bp->line_speed) {
1311 case SPEED_10:
1312 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1313 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1314 break;
1315 }
1316 /* fall through */
1317 case SPEED_100:
1318 val |= BNX2_EMAC_MODE_PORT_MII;
1319 break;
1320 case SPEED_2500:
1321 val |= BNX2_EMAC_MODE_25G_MODE;
1322 /* fall through */
1323 case SPEED_1000:
1324 val |= BNX2_EMAC_MODE_PORT_GMII;
1325 break;
1326 }
1327 }
1328 else {
1329 val |= BNX2_EMAC_MODE_PORT_GMII;
1330 }
1331
1332 /* Set the MAC to operate in the appropriate duplex mode. */
1333 if (bp->duplex == DUPLEX_HALF)
1334 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1335 BNX2_WR(bp, BNX2_EMAC_MODE, val);
1336
1337 /* Enable/disable rx PAUSE. */
1338 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1339
1340 if (bp->flow_ctrl & FLOW_CTRL_RX)
1341 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1342 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1343
1344 /* Enable/disable tx PAUSE. */
1345 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1346 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1347
1348 if (bp->flow_ctrl & FLOW_CTRL_TX)
1349 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1350 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1351
1352 /* Acknowledge the interrupt. */
1353 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1354
1355 bnx2_init_all_rx_contexts(bp);
1356 }
1357
1358 static void
1359 bnx2_enable_bmsr1(struct bnx2 *bp)
1360 {
1361 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1362 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1363 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1364 MII_BNX2_BLK_ADDR_GP_STATUS);
1365 }
1366
1367 static void
1368 bnx2_disable_bmsr1(struct bnx2 *bp)
1369 {
1370 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1371 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1372 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1373 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1374 }
1375
1376 static int
1377 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1378 {
1379 u32 up1;
1380 int ret = 1;
1381
1382 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1383 return 0;
1384
1385 if (bp->autoneg & AUTONEG_SPEED)
1386 bp->advertising |= ADVERTISED_2500baseX_Full;
1387
1388 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1389 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1390
1391 bnx2_read_phy(bp, bp->mii_up1, &up1);
1392 if (!(up1 & BCM5708S_UP1_2G5)) {
1393 up1 |= BCM5708S_UP1_2G5;
1394 bnx2_write_phy(bp, bp->mii_up1, up1);
1395 ret = 0;
1396 }
1397
1398 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1399 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1400 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1401
1402 return ret;
1403 }
1404
1405 static int
1406 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1407 {
1408 u32 up1;
1409 int ret = 0;
1410
1411 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1412 return 0;
1413
1414 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1415 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1416
1417 bnx2_read_phy(bp, bp->mii_up1, &up1);
1418 if (up1 & BCM5708S_UP1_2G5) {
1419 up1 &= ~BCM5708S_UP1_2G5;
1420 bnx2_write_phy(bp, bp->mii_up1, up1);
1421 ret = 1;
1422 }
1423
1424 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1425 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1426 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1427
1428 return ret;
1429 }
1430
1431 static void
1432 bnx2_enable_forced_2g5(struct bnx2 *bp)
1433 {
1434 u32 uninitialized_var(bmcr);
1435 int err;
1436
1437 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1438 return;
1439
1440 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1441 u32 val;
1442
1443 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1444 MII_BNX2_BLK_ADDR_SERDES_DIG);
1445 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1446 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1447 val |= MII_BNX2_SD_MISC1_FORCE |
1448 MII_BNX2_SD_MISC1_FORCE_2_5G;
1449 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1450 }
1451
1452 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1453 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1454 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1455
1456 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1457 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1458 if (!err)
1459 bmcr |= BCM5708S_BMCR_FORCE_2500;
1460 } else {
1461 return;
1462 }
1463
1464 if (err)
1465 return;
1466
1467 if (bp->autoneg & AUTONEG_SPEED) {
1468 bmcr &= ~BMCR_ANENABLE;
1469 if (bp->req_duplex == DUPLEX_FULL)
1470 bmcr |= BMCR_FULLDPLX;
1471 }
1472 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1473 }
1474
1475 static void
1476 bnx2_disable_forced_2g5(struct bnx2 *bp)
1477 {
1478 u32 uninitialized_var(bmcr);
1479 int err;
1480
1481 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1482 return;
1483
1484 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1485 u32 val;
1486
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1488 MII_BNX2_BLK_ADDR_SERDES_DIG);
1489 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1490 val &= ~MII_BNX2_SD_MISC1_FORCE;
1491 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1492 }
1493
1494 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1495 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1496 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1497
1498 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1499 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1500 if (!err)
1501 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1502 } else {
1503 return;
1504 }
1505
1506 if (err)
1507 return;
1508
1509 if (bp->autoneg & AUTONEG_SPEED)
1510 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1511 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1512 }
1513
1514 static void
1515 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1516 {
1517 u32 val;
1518
1519 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1520 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1521 if (start)
1522 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1523 else
1524 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1525 }
1526
1527 static int
1528 bnx2_set_link(struct bnx2 *bp)
1529 {
1530 u32 bmsr;
1531 u8 link_up;
1532
1533 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1534 bp->link_up = 1;
1535 return 0;
1536 }
1537
1538 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1539 return 0;
1540
1541 link_up = bp->link_up;
1542
1543 bnx2_enable_bmsr1(bp);
1544 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1545 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546 bnx2_disable_bmsr1(bp);
1547
1548 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1549 (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1550 u32 val, an_dbg;
1551
1552 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1553 bnx2_5706s_force_link_dn(bp, 0);
1554 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1555 }
1556 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1557
1558 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1559 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561
1562 if ((val & BNX2_EMAC_STATUS_LINK) &&
1563 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1564 bmsr |= BMSR_LSTATUS;
1565 else
1566 bmsr &= ~BMSR_LSTATUS;
1567 }
1568
1569 if (bmsr & BMSR_LSTATUS) {
1570 bp->link_up = 1;
1571
1572 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1573 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1574 bnx2_5706s_linkup(bp);
1575 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1576 bnx2_5708s_linkup(bp);
1577 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1578 bnx2_5709s_linkup(bp);
1579 }
1580 else {
1581 bnx2_copper_linkup(bp);
1582 }
1583 bnx2_resolve_flow_ctrl(bp);
1584 }
1585 else {
1586 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1587 (bp->autoneg & AUTONEG_SPEED))
1588 bnx2_disable_forced_2g5(bp);
1589
1590 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1591 u32 bmcr;
1592
1593 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1594 bmcr |= BMCR_ANENABLE;
1595 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1596
1597 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1598 }
1599 bp->link_up = 0;
1600 }
1601
1602 if (bp->link_up != link_up) {
1603 bnx2_report_link(bp);
1604 }
1605
1606 bnx2_set_mac_link(bp);
1607
1608 return 0;
1609 }
1610
1611 static int
1612 bnx2_reset_phy(struct bnx2 *bp)
1613 {
1614 int i;
1615 u32 reg;
1616
1617 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1618
1619 #define PHY_RESET_MAX_WAIT 100
1620 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1621 udelay(10);
1622
1623 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1624 if (!(reg & BMCR_RESET)) {
1625 udelay(20);
1626 break;
1627 }
1628 }
1629 if (i == PHY_RESET_MAX_WAIT) {
1630 return -EBUSY;
1631 }
1632 return 0;
1633 }
1634
1635 static u32
1636 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1637 {
1638 u32 adv = 0;
1639
1640 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1641 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1642
1643 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644 adv = ADVERTISE_1000XPAUSE;
1645 }
1646 else {
1647 adv = ADVERTISE_PAUSE_CAP;
1648 }
1649 }
1650 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1651 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652 adv = ADVERTISE_1000XPSE_ASYM;
1653 }
1654 else {
1655 adv = ADVERTISE_PAUSE_ASYM;
1656 }
1657 }
1658 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1659 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1661 }
1662 else {
1663 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1664 }
1665 }
1666 return adv;
1667 }
1668
1669 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1670
1671 static int
1672 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1673 __releases(&bp->phy_lock)
1674 __acquires(&bp->phy_lock)
1675 {
1676 u32 speed_arg = 0, pause_adv;
1677
1678 pause_adv = bnx2_phy_get_pause_adv(bp);
1679
1680 if (bp->autoneg & AUTONEG_SPEED) {
1681 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1682 if (bp->advertising & ADVERTISED_10baseT_Half)
1683 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1684 if (bp->advertising & ADVERTISED_10baseT_Full)
1685 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1686 if (bp->advertising & ADVERTISED_100baseT_Half)
1687 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1688 if (bp->advertising & ADVERTISED_100baseT_Full)
1689 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1690 if (bp->advertising & ADVERTISED_1000baseT_Full)
1691 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1692 if (bp->advertising & ADVERTISED_2500baseX_Full)
1693 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1694 } else {
1695 if (bp->req_line_speed == SPEED_2500)
1696 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697 else if (bp->req_line_speed == SPEED_1000)
1698 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699 else if (bp->req_line_speed == SPEED_100) {
1700 if (bp->req_duplex == DUPLEX_FULL)
1701 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1702 else
1703 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1704 } else if (bp->req_line_speed == SPEED_10) {
1705 if (bp->req_duplex == DUPLEX_FULL)
1706 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1707 else
1708 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1709 }
1710 }
1711
1712 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1713 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1714 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1715 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1716
1717 if (port == PORT_TP)
1718 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1719 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1720
1721 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1722
1723 spin_unlock_bh(&bp->phy_lock);
1724 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1725 spin_lock_bh(&bp->phy_lock);
1726
1727 return 0;
1728 }
1729
1730 static int
1731 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1732 __releases(&bp->phy_lock)
1733 __acquires(&bp->phy_lock)
1734 {
1735 u32 adv, bmcr;
1736 u32 new_adv = 0;
1737
1738 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1739 return bnx2_setup_remote_phy(bp, port);
1740
1741 if (!(bp->autoneg & AUTONEG_SPEED)) {
1742 u32 new_bmcr;
1743 int force_link_down = 0;
1744
1745 if (bp->req_line_speed == SPEED_2500) {
1746 if (!bnx2_test_and_enable_2g5(bp))
1747 force_link_down = 1;
1748 } else if (bp->req_line_speed == SPEED_1000) {
1749 if (bnx2_test_and_disable_2g5(bp))
1750 force_link_down = 1;
1751 }
1752 bnx2_read_phy(bp, bp->mii_adv, &adv);
1753 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1754
1755 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1756 new_bmcr = bmcr & ~BMCR_ANENABLE;
1757 new_bmcr |= BMCR_SPEED1000;
1758
1759 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1760 if (bp->req_line_speed == SPEED_2500)
1761 bnx2_enable_forced_2g5(bp);
1762 else if (bp->req_line_speed == SPEED_1000) {
1763 bnx2_disable_forced_2g5(bp);
1764 new_bmcr &= ~0x2000;
1765 }
1766
1767 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1768 if (bp->req_line_speed == SPEED_2500)
1769 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1770 else
1771 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1772 }
1773
1774 if (bp->req_duplex == DUPLEX_FULL) {
1775 adv |= ADVERTISE_1000XFULL;
1776 new_bmcr |= BMCR_FULLDPLX;
1777 }
1778 else {
1779 adv |= ADVERTISE_1000XHALF;
1780 new_bmcr &= ~BMCR_FULLDPLX;
1781 }
1782 if ((new_bmcr != bmcr) || (force_link_down)) {
1783 /* Force a link down visible on the other side */
1784 if (bp->link_up) {
1785 bnx2_write_phy(bp, bp->mii_adv, adv &
1786 ~(ADVERTISE_1000XFULL |
1787 ADVERTISE_1000XHALF));
1788 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1789 BMCR_ANRESTART | BMCR_ANENABLE);
1790
1791 bp->link_up = 0;
1792 netif_carrier_off(bp->dev);
1793 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1794 bnx2_report_link(bp);
1795 }
1796 bnx2_write_phy(bp, bp->mii_adv, adv);
1797 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1798 } else {
1799 bnx2_resolve_flow_ctrl(bp);
1800 bnx2_set_mac_link(bp);
1801 }
1802 return 0;
1803 }
1804
1805 bnx2_test_and_enable_2g5(bp);
1806
1807 if (bp->advertising & ADVERTISED_1000baseT_Full)
1808 new_adv |= ADVERTISE_1000XFULL;
1809
1810 new_adv |= bnx2_phy_get_pause_adv(bp);
1811
1812 bnx2_read_phy(bp, bp->mii_adv, &adv);
1813 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1814
1815 bp->serdes_an_pending = 0;
1816 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1817 /* Force a link down visible on the other side */
1818 if (bp->link_up) {
1819 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1820 spin_unlock_bh(&bp->phy_lock);
1821 msleep(20);
1822 spin_lock_bh(&bp->phy_lock);
1823 }
1824
1825 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1826 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1827 BMCR_ANENABLE);
1828 /* Speed up link-up time when the link partner
1829 * does not autonegotiate which is very common
1830 * in blade servers. Some blade servers use
1831 * IPMI for kerboard input and it's important
1832 * to minimize link disruptions. Autoneg. involves
1833 * exchanging base pages plus 3 next pages and
1834 * normally completes in about 120 msec.
1835 */
1836 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1837 bp->serdes_an_pending = 1;
1838 mod_timer(&bp->timer, jiffies + bp->current_interval);
1839 } else {
1840 bnx2_resolve_flow_ctrl(bp);
1841 bnx2_set_mac_link(bp);
1842 }
1843
1844 return 0;
1845 }
1846
1847 #define ETHTOOL_ALL_FIBRE_SPEED \
1848 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1849 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1850 (ADVERTISED_1000baseT_Full)
1851
1852 #define ETHTOOL_ALL_COPPER_SPEED \
1853 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1854 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1855 ADVERTISED_1000baseT_Full)
1856
1857 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1858 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1859
1860 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1861
1862 static void
1863 bnx2_set_default_remote_link(struct bnx2 *bp)
1864 {
1865 u32 link;
1866
1867 if (bp->phy_port == PORT_TP)
1868 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1869 else
1870 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1871
1872 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1873 bp->req_line_speed = 0;
1874 bp->autoneg |= AUTONEG_SPEED;
1875 bp->advertising = ADVERTISED_Autoneg;
1876 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1877 bp->advertising |= ADVERTISED_10baseT_Half;
1878 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1879 bp->advertising |= ADVERTISED_10baseT_Full;
1880 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1881 bp->advertising |= ADVERTISED_100baseT_Half;
1882 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1883 bp->advertising |= ADVERTISED_100baseT_Full;
1884 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1885 bp->advertising |= ADVERTISED_1000baseT_Full;
1886 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1887 bp->advertising |= ADVERTISED_2500baseX_Full;
1888 } else {
1889 bp->autoneg = 0;
1890 bp->advertising = 0;
1891 bp->req_duplex = DUPLEX_FULL;
1892 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1893 bp->req_line_speed = SPEED_10;
1894 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1895 bp->req_duplex = DUPLEX_HALF;
1896 }
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1898 bp->req_line_speed = SPEED_100;
1899 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1900 bp->req_duplex = DUPLEX_HALF;
1901 }
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1903 bp->req_line_speed = SPEED_1000;
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1905 bp->req_line_speed = SPEED_2500;
1906 }
1907 }
1908
1909 static void
1910 bnx2_set_default_link(struct bnx2 *bp)
1911 {
1912 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1913 bnx2_set_default_remote_link(bp);
1914 return;
1915 }
1916
1917 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1918 bp->req_line_speed = 0;
1919 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1920 u32 reg;
1921
1922 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1923
1924 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1925 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1926 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1927 bp->autoneg = 0;
1928 bp->req_line_speed = bp->line_speed = SPEED_1000;
1929 bp->req_duplex = DUPLEX_FULL;
1930 }
1931 } else
1932 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1933 }
1934
1935 static void
1936 bnx2_send_heart_beat(struct bnx2 *bp)
1937 {
1938 u32 msg;
1939 u32 addr;
1940
1941 spin_lock(&bp->indirect_lock);
1942 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1943 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1944 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1945 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1946 spin_unlock(&bp->indirect_lock);
1947 }
1948
1949 static void
1950 bnx2_remote_phy_event(struct bnx2 *bp)
1951 {
1952 u32 msg;
1953 u8 link_up = bp->link_up;
1954 u8 old_port;
1955
1956 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1957
1958 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1959 bnx2_send_heart_beat(bp);
1960
1961 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1962
1963 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1964 bp->link_up = 0;
1965 else {
1966 u32 speed;
1967
1968 bp->link_up = 1;
1969 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1970 bp->duplex = DUPLEX_FULL;
1971 switch (speed) {
1972 case BNX2_LINK_STATUS_10HALF:
1973 bp->duplex = DUPLEX_HALF;
1974 /* fall through */
1975 case BNX2_LINK_STATUS_10FULL:
1976 bp->line_speed = SPEED_10;
1977 break;
1978 case BNX2_LINK_STATUS_100HALF:
1979 bp->duplex = DUPLEX_HALF;
1980 /* fall through */
1981 case BNX2_LINK_STATUS_100BASE_T4:
1982 case BNX2_LINK_STATUS_100FULL:
1983 bp->line_speed = SPEED_100;
1984 break;
1985 case BNX2_LINK_STATUS_1000HALF:
1986 bp->duplex = DUPLEX_HALF;
1987 /* fall through */
1988 case BNX2_LINK_STATUS_1000FULL:
1989 bp->line_speed = SPEED_1000;
1990 break;
1991 case BNX2_LINK_STATUS_2500HALF:
1992 bp->duplex = DUPLEX_HALF;
1993 /* fall through */
1994 case BNX2_LINK_STATUS_2500FULL:
1995 bp->line_speed = SPEED_2500;
1996 break;
1997 default:
1998 bp->line_speed = 0;
1999 break;
2000 }
2001
2002 bp->flow_ctrl = 0;
2003 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2004 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2005 if (bp->duplex == DUPLEX_FULL)
2006 bp->flow_ctrl = bp->req_flow_ctrl;
2007 } else {
2008 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2009 bp->flow_ctrl |= FLOW_CTRL_TX;
2010 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2011 bp->flow_ctrl |= FLOW_CTRL_RX;
2012 }
2013
2014 old_port = bp->phy_port;
2015 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2016 bp->phy_port = PORT_FIBRE;
2017 else
2018 bp->phy_port = PORT_TP;
2019
2020 if (old_port != bp->phy_port)
2021 bnx2_set_default_link(bp);
2022
2023 }
2024 if (bp->link_up != link_up)
2025 bnx2_report_link(bp);
2026
2027 bnx2_set_mac_link(bp);
2028 }
2029
2030 static int
2031 bnx2_set_remote_link(struct bnx2 *bp)
2032 {
2033 u32 evt_code;
2034
2035 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2036 switch (evt_code) {
2037 case BNX2_FW_EVT_CODE_LINK_EVENT:
2038 bnx2_remote_phy_event(bp);
2039 break;
2040 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2041 default:
2042 bnx2_send_heart_beat(bp);
2043 break;
2044 }
2045 return 0;
2046 }
2047
2048 static int
2049 bnx2_setup_copper_phy(struct bnx2 *bp)
2050 __releases(&bp->phy_lock)
2051 __acquires(&bp->phy_lock)
2052 {
2053 u32 bmcr;
2054 u32 new_bmcr;
2055
2056 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2057
2058 if (bp->autoneg & AUTONEG_SPEED) {
2059 u32 adv_reg, adv1000_reg;
2060 u32 new_adv = 0;
2061 u32 new_adv1000 = 0;
2062
2063 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2064 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2065 ADVERTISE_PAUSE_ASYM);
2066
2067 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2068 adv1000_reg &= PHY_ALL_1000_SPEED;
2069
2070 new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
2071 new_adv |= ADVERTISE_CSMA;
2072 new_adv |= bnx2_phy_get_pause_adv(bp);
2073
2074 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2075
2076 if ((adv1000_reg != new_adv1000) ||
2077 (adv_reg != new_adv) ||
2078 ((bmcr & BMCR_ANENABLE) == 0)) {
2079
2080 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2081 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2082 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2083 BMCR_ANENABLE);
2084 }
2085 else if (bp->link_up) {
2086 /* Flow ctrl may have changed from auto to forced */
2087 /* or vice-versa. */
2088
2089 bnx2_resolve_flow_ctrl(bp);
2090 bnx2_set_mac_link(bp);
2091 }
2092 return 0;
2093 }
2094
2095 new_bmcr = 0;
2096 if (bp->req_line_speed == SPEED_100) {
2097 new_bmcr |= BMCR_SPEED100;
2098 }
2099 if (bp->req_duplex == DUPLEX_FULL) {
2100 new_bmcr |= BMCR_FULLDPLX;
2101 }
2102 if (new_bmcr != bmcr) {
2103 u32 bmsr;
2104
2105 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2106 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2107
2108 if (bmsr & BMSR_LSTATUS) {
2109 /* Force link down */
2110 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2111 spin_unlock_bh(&bp->phy_lock);
2112 msleep(50);
2113 spin_lock_bh(&bp->phy_lock);
2114
2115 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2117 }
2118
2119 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2120
2121 /* Normally, the new speed is setup after the link has
2122 * gone down and up again. In some cases, link will not go
2123 * down so we need to set up the new speed here.
2124 */
2125 if (bmsr & BMSR_LSTATUS) {
2126 bp->line_speed = bp->req_line_speed;
2127 bp->duplex = bp->req_duplex;
2128 bnx2_resolve_flow_ctrl(bp);
2129 bnx2_set_mac_link(bp);
2130 }
2131 } else {
2132 bnx2_resolve_flow_ctrl(bp);
2133 bnx2_set_mac_link(bp);
2134 }
2135 return 0;
2136 }
2137
2138 static int
2139 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2140 __releases(&bp->phy_lock)
2141 __acquires(&bp->phy_lock)
2142 {
2143 if (bp->loopback == MAC_LOOPBACK)
2144 return 0;
2145
2146 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2147 return bnx2_setup_serdes_phy(bp, port);
2148 }
2149 else {
2150 return bnx2_setup_copper_phy(bp);
2151 }
2152 }
2153
2154 static int
2155 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2156 {
2157 u32 val;
2158
2159 bp->mii_bmcr = MII_BMCR + 0x10;
2160 bp->mii_bmsr = MII_BMSR + 0x10;
2161 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2162 bp->mii_adv = MII_ADVERTISE + 0x10;
2163 bp->mii_lpa = MII_LPA + 0x10;
2164 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2165
2166 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2167 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2168
2169 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2170 if (reset_phy)
2171 bnx2_reset_phy(bp);
2172
2173 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2174
2175 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2176 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2177 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2178 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2179
2180 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2181 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2182 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2183 val |= BCM5708S_UP1_2G5;
2184 else
2185 val &= ~BCM5708S_UP1_2G5;
2186 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2187
2188 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2189 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2190 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2191 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2192
2193 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2194
2195 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2196 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2197 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2198
2199 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2200
2201 return 0;
2202 }
2203
2204 static int
2205 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2206 {
2207 u32 val;
2208
2209 if (reset_phy)
2210 bnx2_reset_phy(bp);
2211
2212 bp->mii_up1 = BCM5708S_UP1;
2213
2214 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2215 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2216 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2217
2218 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2219 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2220 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2221
2222 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2223 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2224 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2225
2226 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2227 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2228 val |= BCM5708S_UP1_2G5;
2229 bnx2_write_phy(bp, BCM5708S_UP1, val);
2230 }
2231
2232 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2233 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2234 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2235 /* increase tx signal amplitude */
2236 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2237 BCM5708S_BLK_ADDR_TX_MISC);
2238 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2239 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2240 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2241 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2242 }
2243
2244 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2245 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2246
2247 if (val) {
2248 u32 is_backplane;
2249
2250 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2251 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2252 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2253 BCM5708S_BLK_ADDR_TX_MISC);
2254 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2255 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2256 BCM5708S_BLK_ADDR_DIG);
2257 }
2258 }
2259 return 0;
2260 }
2261
2262 static int
2263 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2264 {
2265 if (reset_phy)
2266 bnx2_reset_phy(bp);
2267
2268 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2269
2270 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2271 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2272
2273 if (bp->dev->mtu > 1500) {
2274 u32 val;
2275
2276 /* Set extended packet length bit */
2277 bnx2_write_phy(bp, 0x18, 0x7);
2278 bnx2_read_phy(bp, 0x18, &val);
2279 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2280
2281 bnx2_write_phy(bp, 0x1c, 0x6c00);
2282 bnx2_read_phy(bp, 0x1c, &val);
2283 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2284 }
2285 else {
2286 u32 val;
2287
2288 bnx2_write_phy(bp, 0x18, 0x7);
2289 bnx2_read_phy(bp, 0x18, &val);
2290 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2291
2292 bnx2_write_phy(bp, 0x1c, 0x6c00);
2293 bnx2_read_phy(bp, 0x1c, &val);
2294 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2295 }
2296
2297 return 0;
2298 }
2299
2300 static int
2301 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2302 {
2303 u32 val;
2304
2305 if (reset_phy)
2306 bnx2_reset_phy(bp);
2307
2308 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2309 bnx2_write_phy(bp, 0x18, 0x0c00);
2310 bnx2_write_phy(bp, 0x17, 0x000a);
2311 bnx2_write_phy(bp, 0x15, 0x310b);
2312 bnx2_write_phy(bp, 0x17, 0x201f);
2313 bnx2_write_phy(bp, 0x15, 0x9506);
2314 bnx2_write_phy(bp, 0x17, 0x401f);
2315 bnx2_write_phy(bp, 0x15, 0x14e2);
2316 bnx2_write_phy(bp, 0x18, 0x0400);
2317 }
2318
2319 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2320 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2321 MII_BNX2_DSP_EXPAND_REG | 0x8);
2322 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2323 val &= ~(1 << 8);
2324 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2325 }
2326
2327 if (bp->dev->mtu > 1500) {
2328 /* Set extended packet length bit */
2329 bnx2_write_phy(bp, 0x18, 0x7);
2330 bnx2_read_phy(bp, 0x18, &val);
2331 bnx2_write_phy(bp, 0x18, val | 0x4000);
2332
2333 bnx2_read_phy(bp, 0x10, &val);
2334 bnx2_write_phy(bp, 0x10, val | 0x1);
2335 }
2336 else {
2337 bnx2_write_phy(bp, 0x18, 0x7);
2338 bnx2_read_phy(bp, 0x18, &val);
2339 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2340
2341 bnx2_read_phy(bp, 0x10, &val);
2342 bnx2_write_phy(bp, 0x10, val & ~0x1);
2343 }
2344
2345 /* ethernet@wirespeed */
2346 bnx2_write_phy(bp, 0x18, 0x7007);
2347 bnx2_read_phy(bp, 0x18, &val);
2348 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2349 return 0;
2350 }
2351
2352
2353 static int
2354 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2355 __releases(&bp->phy_lock)
2356 __acquires(&bp->phy_lock)
2357 {
2358 u32 val;
2359 int rc = 0;
2360
2361 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2362 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2363
2364 bp->mii_bmcr = MII_BMCR;
2365 bp->mii_bmsr = MII_BMSR;
2366 bp->mii_bmsr1 = MII_BMSR;
2367 bp->mii_adv = MII_ADVERTISE;
2368 bp->mii_lpa = MII_LPA;
2369
2370 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2371
2372 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2373 goto setup_phy;
2374
2375 bnx2_read_phy(bp, MII_PHYSID1, &val);
2376 bp->phy_id = val << 16;
2377 bnx2_read_phy(bp, MII_PHYSID2, &val);
2378 bp->phy_id |= val & 0xffff;
2379
2380 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2381 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2382 rc = bnx2_init_5706s_phy(bp, reset_phy);
2383 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2384 rc = bnx2_init_5708s_phy(bp, reset_phy);
2385 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2386 rc = bnx2_init_5709s_phy(bp, reset_phy);
2387 }
2388 else {
2389 rc = bnx2_init_copper_phy(bp, reset_phy);
2390 }
2391
2392 setup_phy:
2393 if (!rc)
2394 rc = bnx2_setup_phy(bp, bp->phy_port);
2395
2396 return rc;
2397 }
2398
2399 static int
2400 bnx2_set_mac_loopback(struct bnx2 *bp)
2401 {
2402 u32 mac_mode;
2403
2404 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2405 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2406 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2407 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2408 bp->link_up = 1;
2409 return 0;
2410 }
2411
2412 static int bnx2_test_link(struct bnx2 *);
2413
2414 static int
2415 bnx2_set_phy_loopback(struct bnx2 *bp)
2416 {
2417 u32 mac_mode;
2418 int rc, i;
2419
2420 spin_lock_bh(&bp->phy_lock);
2421 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2422 BMCR_SPEED1000);
2423 spin_unlock_bh(&bp->phy_lock);
2424 if (rc)
2425 return rc;
2426
2427 for (i = 0; i < 10; i++) {
2428 if (bnx2_test_link(bp) == 0)
2429 break;
2430 msleep(100);
2431 }
2432
2433 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2434 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2435 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2436 BNX2_EMAC_MODE_25G_MODE);
2437
2438 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2439 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2440 bp->link_up = 1;
2441 return 0;
2442 }
2443
2444 static void
2445 bnx2_dump_mcp_state(struct bnx2 *bp)
2446 {
2447 struct net_device *dev = bp->dev;
2448 u32 mcp_p0, mcp_p1;
2449
2450 netdev_err(dev, "<--- start MCP states dump --->\n");
2451 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2452 mcp_p0 = BNX2_MCP_STATE_P0;
2453 mcp_p1 = BNX2_MCP_STATE_P1;
2454 } else {
2455 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2456 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2457 }
2458 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2459 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2460 netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2461 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2462 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2463 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2464 netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2465 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2466 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2467 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2468 netdev_err(dev, "DEBUG: shmem states:\n");
2469 netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2470 bnx2_shmem_rd(bp, BNX2_DRV_MB),
2471 bnx2_shmem_rd(bp, BNX2_FW_MB),
2472 bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2473 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2474 netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2475 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2476 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2477 pr_cont(" condition[%08x]\n",
2478 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2479 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2480 DP_SHMEM_LINE(bp, 0x3cc);
2481 DP_SHMEM_LINE(bp, 0x3dc);
2482 DP_SHMEM_LINE(bp, 0x3ec);
2483 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2484 netdev_err(dev, "<--- end MCP states dump --->\n");
2485 }
2486
2487 static int
2488 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2489 {
2490 int i;
2491 u32 val;
2492
2493 bp->fw_wr_seq++;
2494 msg_data |= bp->fw_wr_seq;
2495
2496 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2497
2498 if (!ack)
2499 return 0;
2500
2501 /* wait for an acknowledgement. */
2502 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2503 msleep(10);
2504
2505 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2506
2507 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2508 break;
2509 }
2510 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2511 return 0;
2512
2513 /* If we timed out, inform the firmware that this is the case. */
2514 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2515 msg_data &= ~BNX2_DRV_MSG_CODE;
2516 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2517
2518 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2519 if (!silent) {
2520 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2521 bnx2_dump_mcp_state(bp);
2522 }
2523
2524 return -EBUSY;
2525 }
2526
2527 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2528 return -EIO;
2529
2530 return 0;
2531 }
2532
2533 static int
2534 bnx2_init_5709_context(struct bnx2 *bp)
2535 {
2536 int i, ret = 0;
2537 u32 val;
2538
2539 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2540 val |= (BNX2_PAGE_BITS - 8) << 16;
2541 BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2542 for (i = 0; i < 10; i++) {
2543 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2544 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2545 break;
2546 udelay(2);
2547 }
2548 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2549 return -EBUSY;
2550
2551 for (i = 0; i < bp->ctx_pages; i++) {
2552 int j;
2553
2554 if (bp->ctx_blk[i])
2555 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2556 else
2557 return -ENOMEM;
2558
2559 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2560 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2561 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2562 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2563 (u64) bp->ctx_blk_mapping[i] >> 32);
2564 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2565 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2566 for (j = 0; j < 10; j++) {
2567
2568 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2569 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2570 break;
2571 udelay(5);
2572 }
2573 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2574 ret = -EBUSY;
2575 break;
2576 }
2577 }
2578 return ret;
2579 }
2580
2581 static void
2582 bnx2_init_context(struct bnx2 *bp)
2583 {
2584 u32 vcid;
2585
2586 vcid = 96;
2587 while (vcid) {
2588 u32 vcid_addr, pcid_addr, offset;
2589 int i;
2590
2591 vcid--;
2592
2593 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2594 u32 new_vcid;
2595
2596 vcid_addr = GET_PCID_ADDR(vcid);
2597 if (vcid & 0x8) {
2598 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2599 }
2600 else {
2601 new_vcid = vcid;
2602 }
2603 pcid_addr = GET_PCID_ADDR(new_vcid);
2604 }
2605 else {
2606 vcid_addr = GET_CID_ADDR(vcid);
2607 pcid_addr = vcid_addr;
2608 }
2609
2610 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2611 vcid_addr += (i << PHY_CTX_SHIFT);
2612 pcid_addr += (i << PHY_CTX_SHIFT);
2613
2614 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2615 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2616
2617 /* Zero out the context. */
2618 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2619 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2620 }
2621 }
2622 }
2623
2624 static int
2625 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2626 {
2627 u16 *good_mbuf;
2628 u32 good_mbuf_cnt;
2629 u32 val;
2630
2631 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2632 if (good_mbuf == NULL)
2633 return -ENOMEM;
2634
2635 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2636 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2637
2638 good_mbuf_cnt = 0;
2639
2640 /* Allocate a bunch of mbufs and save the good ones in an array. */
2641 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2642 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2643 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2644 BNX2_RBUF_COMMAND_ALLOC_REQ);
2645
2646 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2647
2648 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2649
2650 /* The addresses with Bit 9 set are bad memory blocks. */
2651 if (!(val & (1 << 9))) {
2652 good_mbuf[good_mbuf_cnt] = (u16) val;
2653 good_mbuf_cnt++;
2654 }
2655
2656 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2657 }
2658
2659 /* Free the good ones back to the mbuf pool thus discarding
2660 * all the bad ones. */
2661 while (good_mbuf_cnt) {
2662 good_mbuf_cnt--;
2663
2664 val = good_mbuf[good_mbuf_cnt];
2665 val = (val << 9) | val | 1;
2666
2667 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2668 }
2669 kfree(good_mbuf);
2670 return 0;
2671 }
2672
2673 static void
2674 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2675 {
2676 u32 val;
2677
2678 val = (mac_addr[0] << 8) | mac_addr[1];
2679
2680 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2681
2682 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2683 (mac_addr[4] << 8) | mac_addr[5];
2684
2685 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2686 }
2687
2688 static inline int
2689 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2690 {
2691 dma_addr_t mapping;
2692 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2693 struct bnx2_rx_bd *rxbd =
2694 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2695 struct page *page = alloc_page(gfp);
2696
2697 if (!page)
2698 return -ENOMEM;
2699 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2700 PCI_DMA_FROMDEVICE);
2701 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2702 __free_page(page);
2703 return -EIO;
2704 }
2705
2706 rx_pg->page = page;
2707 dma_unmap_addr_set(rx_pg, mapping, mapping);
2708 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2709 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2710 return 0;
2711 }
2712
2713 static void
2714 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2715 {
2716 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2717 struct page *page = rx_pg->page;
2718
2719 if (!page)
2720 return;
2721
2722 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2723 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2724
2725 __free_page(page);
2726 rx_pg->page = NULL;
2727 }
2728
2729 static inline int
2730 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2731 {
2732 u8 *data;
2733 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2734 dma_addr_t mapping;
2735 struct bnx2_rx_bd *rxbd =
2736 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2737
2738 data = kmalloc(bp->rx_buf_size, gfp);
2739 if (!data)
2740 return -ENOMEM;
2741
2742 mapping = dma_map_single(&bp->pdev->dev,
2743 get_l2_fhdr(data),
2744 bp->rx_buf_use_size,
2745 PCI_DMA_FROMDEVICE);
2746 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2747 kfree(data);
2748 return -EIO;
2749 }
2750
2751 rx_buf->data = data;
2752 dma_unmap_addr_set(rx_buf, mapping, mapping);
2753
2754 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2755 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2756
2757 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2758
2759 return 0;
2760 }
2761
2762 static int
2763 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2764 {
2765 struct status_block *sblk = bnapi->status_blk.msi;
2766 u32 new_link_state, old_link_state;
2767 int is_set = 1;
2768
2769 new_link_state = sblk->status_attn_bits & event;
2770 old_link_state = sblk->status_attn_bits_ack & event;
2771 if (new_link_state != old_link_state) {
2772 if (new_link_state)
2773 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2774 else
2775 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2776 } else
2777 is_set = 0;
2778
2779 return is_set;
2780 }
2781
2782 static void
2783 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2784 {
2785 spin_lock(&bp->phy_lock);
2786
2787 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2788 bnx2_set_link(bp);
2789 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2790 bnx2_set_remote_link(bp);
2791
2792 spin_unlock(&bp->phy_lock);
2793
2794 }
2795
2796 static inline u16
2797 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2798 {
2799 u16 cons;
2800
2801 /* Tell compiler that status block fields can change. */
2802 barrier();
2803 cons = *bnapi->hw_tx_cons_ptr;
2804 barrier();
2805 if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2806 cons++;
2807 return cons;
2808 }
2809
2810 static int
2811 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2812 {
2813 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2814 u16 hw_cons, sw_cons, sw_ring_cons;
2815 int tx_pkt = 0, index;
2816 unsigned int tx_bytes = 0;
2817 struct netdev_queue *txq;
2818
2819 index = (bnapi - bp->bnx2_napi);
2820 txq = netdev_get_tx_queue(bp->dev, index);
2821
2822 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2823 sw_cons = txr->tx_cons;
2824
2825 while (sw_cons != hw_cons) {
2826 struct bnx2_sw_tx_bd *tx_buf;
2827 struct sk_buff *skb;
2828 int i, last;
2829
2830 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2831
2832 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2833 skb = tx_buf->skb;
2834
2835 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2836 prefetch(&skb->end);
2837
2838 /* partial BD completions possible with TSO packets */
2839 if (tx_buf->is_gso) {
2840 u16 last_idx, last_ring_idx;
2841
2842 last_idx = sw_cons + tx_buf->nr_frags + 1;
2843 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2844 if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2845 last_idx++;
2846 }
2847 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2848 break;
2849 }
2850 }
2851
2852 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2853 skb_headlen(skb), PCI_DMA_TODEVICE);
2854
2855 tx_buf->skb = NULL;
2856 last = tx_buf->nr_frags;
2857
2858 for (i = 0; i < last; i++) {
2859 struct bnx2_sw_tx_bd *tx_buf;
2860
2861 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2862
2863 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2864 dma_unmap_page(&bp->pdev->dev,
2865 dma_unmap_addr(tx_buf, mapping),
2866 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2867 PCI_DMA_TODEVICE);
2868 }
2869
2870 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2871
2872 tx_bytes += skb->len;
2873 dev_kfree_skb(skb);
2874 tx_pkt++;
2875 if (tx_pkt == budget)
2876 break;
2877
2878 if (hw_cons == sw_cons)
2879 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2880 }
2881
2882 netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2883 txr->hw_tx_cons = hw_cons;
2884 txr->tx_cons = sw_cons;
2885
2886 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2887 * before checking for netif_tx_queue_stopped(). Without the
2888 * memory barrier, there is a small possibility that bnx2_start_xmit()
2889 * will miss it and cause the queue to be stopped forever.
2890 */
2891 smp_mb();
2892
2893 if (unlikely(netif_tx_queue_stopped(txq)) &&
2894 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2895 __netif_tx_lock(txq, smp_processor_id());
2896 if ((netif_tx_queue_stopped(txq)) &&
2897 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2898 netif_tx_wake_queue(txq);
2899 __netif_tx_unlock(txq);
2900 }
2901
2902 return tx_pkt;
2903 }
2904
2905 static void
2906 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2907 struct sk_buff *skb, int count)
2908 {
2909 struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2910 struct bnx2_rx_bd *cons_bd, *prod_bd;
2911 int i;
2912 u16 hw_prod, prod;
2913 u16 cons = rxr->rx_pg_cons;
2914
2915 cons_rx_pg = &rxr->rx_pg_ring[cons];
2916
2917 /* The caller was unable to allocate a new page to replace the
2918 * last one in the frags array, so we need to recycle that page
2919 * and then free the skb.
2920 */
2921 if (skb) {
2922 struct page *page;
2923 struct skb_shared_info *shinfo;
2924
2925 shinfo = skb_shinfo(skb);
2926 shinfo->nr_frags--;
2927 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2928 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2929
2930 cons_rx_pg->page = page;
2931 dev_kfree_skb(skb);
2932 }
2933
2934 hw_prod = rxr->rx_pg_prod;
2935
2936 for (i = 0; i < count; i++) {
2937 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2938
2939 prod_rx_pg = &rxr->rx_pg_ring[prod];
2940 cons_rx_pg = &rxr->rx_pg_ring[cons];
2941 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2942 [BNX2_RX_IDX(cons)];
2943 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2944 [BNX2_RX_IDX(prod)];
2945
2946 if (prod != cons) {
2947 prod_rx_pg->page = cons_rx_pg->page;
2948 cons_rx_pg->page = NULL;
2949 dma_unmap_addr_set(prod_rx_pg, mapping,
2950 dma_unmap_addr(cons_rx_pg, mapping));
2951
2952 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2953 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2954
2955 }
2956 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2957 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2958 }
2959 rxr->rx_pg_prod = hw_prod;
2960 rxr->rx_pg_cons = cons;
2961 }
2962
2963 static inline void
2964 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2965 u8 *data, u16 cons, u16 prod)
2966 {
2967 struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2968 struct bnx2_rx_bd *cons_bd, *prod_bd;
2969
2970 cons_rx_buf = &rxr->rx_buf_ring[cons];
2971 prod_rx_buf = &rxr->rx_buf_ring[prod];
2972
2973 dma_sync_single_for_device(&bp->pdev->dev,
2974 dma_unmap_addr(cons_rx_buf, mapping),
2975 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2976
2977 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2978
2979 prod_rx_buf->data = data;
2980
2981 if (cons == prod)
2982 return;
2983
2984 dma_unmap_addr_set(prod_rx_buf, mapping,
2985 dma_unmap_addr(cons_rx_buf, mapping));
2986
2987 cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
2988 prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
2989 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2990 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2991 }
2992
2993 static struct sk_buff *
2994 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
2995 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2996 u32 ring_idx)
2997 {
2998 int err;
2999 u16 prod = ring_idx & 0xffff;
3000 struct sk_buff *skb;
3001
3002 err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3003 if (unlikely(err)) {
3004 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3005 error:
3006 if (hdr_len) {
3007 unsigned int raw_len = len + 4;
3008 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3009
3010 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3011 }
3012 return NULL;
3013 }
3014
3015 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3016 PCI_DMA_FROMDEVICE);
3017 skb = build_skb(data, 0);
3018 if (!skb) {
3019 kfree(data);
3020 goto error;
3021 }
3022 skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3023 if (hdr_len == 0) {
3024 skb_put(skb, len);
3025 return skb;
3026 } else {
3027 unsigned int i, frag_len, frag_size, pages;
3028 struct bnx2_sw_pg *rx_pg;
3029 u16 pg_cons = rxr->rx_pg_cons;
3030 u16 pg_prod = rxr->rx_pg_prod;
3031
3032 frag_size = len + 4 - hdr_len;
3033 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3034 skb_put(skb, hdr_len);
3035
3036 for (i = 0; i < pages; i++) {
3037 dma_addr_t mapping_old;
3038
3039 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3040 if (unlikely(frag_len <= 4)) {
3041 unsigned int tail = 4 - frag_len;
3042
3043 rxr->rx_pg_cons = pg_cons;
3044 rxr->rx_pg_prod = pg_prod;
3045 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3046 pages - i);
3047 skb->len -= tail;
3048 if (i == 0) {
3049 skb->tail -= tail;
3050 } else {
3051 skb_frag_t *frag =
3052 &skb_shinfo(skb)->frags[i - 1];
3053 skb_frag_size_sub(frag, tail);
3054 skb->data_len -= tail;
3055 }
3056 return skb;
3057 }
3058 rx_pg = &rxr->rx_pg_ring[pg_cons];
3059
3060 /* Don't unmap yet. If we're unable to allocate a new
3061 * page, we need to recycle the page and the DMA addr.
3062 */
3063 mapping_old = dma_unmap_addr(rx_pg, mapping);
3064 if (i == pages - 1)
3065 frag_len -= 4;
3066
3067 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3068 rx_pg->page = NULL;
3069
3070 err = bnx2_alloc_rx_page(bp, rxr,
3071 BNX2_RX_PG_RING_IDX(pg_prod),
3072 GFP_ATOMIC);
3073 if (unlikely(err)) {
3074 rxr->rx_pg_cons = pg_cons;
3075 rxr->rx_pg_prod = pg_prod;
3076 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3077 pages - i);
3078 return NULL;
3079 }
3080
3081 dma_unmap_page(&bp->pdev->dev, mapping_old,
3082 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3083
3084 frag_size -= frag_len;
3085 skb->data_len += frag_len;
3086 skb->truesize += PAGE_SIZE;
3087 skb->len += frag_len;
3088
3089 pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3090 pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3091 }
3092 rxr->rx_pg_prod = pg_prod;
3093 rxr->rx_pg_cons = pg_cons;
3094 }
3095 return skb;
3096 }
3097
3098 static inline u16
3099 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3100 {
3101 u16 cons;
3102
3103 /* Tell compiler that status block fields can change. */
3104 barrier();
3105 cons = *bnapi->hw_rx_cons_ptr;
3106 barrier();
3107 if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3108 cons++;
3109 return cons;
3110 }
3111
3112 static int
3113 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3114 {
3115 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3116 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3117 struct l2_fhdr *rx_hdr;
3118 int rx_pkt = 0, pg_ring_used = 0;
3119
3120 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3121 sw_cons = rxr->rx_cons;
3122 sw_prod = rxr->rx_prod;
3123
3124 /* Memory barrier necessary as speculative reads of the rx
3125 * buffer can be ahead of the index in the status block
3126 */
3127 rmb();
3128 while (sw_cons != hw_cons) {
3129 unsigned int len, hdr_len;
3130 u32 status;
3131 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3132 struct sk_buff *skb;
3133 dma_addr_t dma_addr;
3134 u8 *data;
3135 u16 next_ring_idx;
3136
3137 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3138 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3139
3140 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3141 data = rx_buf->data;
3142 rx_buf->data = NULL;
3143
3144 rx_hdr = get_l2_fhdr(data);
3145 prefetch(rx_hdr);
3146
3147 dma_addr = dma_unmap_addr(rx_buf, mapping);
3148
3149 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3150 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3151 PCI_DMA_FROMDEVICE);
3152
3153 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3154 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3155 prefetch(get_l2_fhdr(next_rx_buf->data));
3156
3157 len = rx_hdr->l2_fhdr_pkt_len;
3158 status = rx_hdr->l2_fhdr_status;
3159
3160 hdr_len = 0;
3161 if (status & L2_FHDR_STATUS_SPLIT) {
3162 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3163 pg_ring_used = 1;
3164 } else if (len > bp->rx_jumbo_thresh) {
3165 hdr_len = bp->rx_jumbo_thresh;
3166 pg_ring_used = 1;
3167 }
3168
3169 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3170 L2_FHDR_ERRORS_PHY_DECODE |
3171 L2_FHDR_ERRORS_ALIGNMENT |
3172 L2_FHDR_ERRORS_TOO_SHORT |
3173 L2_FHDR_ERRORS_GIANT_FRAME))) {
3174
3175 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3176 sw_ring_prod);
3177 if (pg_ring_used) {
3178 int pages;
3179
3180 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3181
3182 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3183 }
3184 goto next_rx;
3185 }
3186
3187 len -= 4;
3188
3189 if (len <= bp->rx_copy_thresh) {
3190 skb = netdev_alloc_skb(bp->dev, len + 6);
3191 if (skb == NULL) {
3192 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3193 sw_ring_prod);
3194 goto next_rx;
3195 }
3196
3197 /* aligned copy */
3198 memcpy(skb->data,
3199 (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3200 len + 6);
3201 skb_reserve(skb, 6);
3202 skb_put(skb, len);
3203
3204 bnx2_reuse_rx_data(bp, rxr, data,
3205 sw_ring_cons, sw_ring_prod);
3206
3207 } else {
3208 skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3209 (sw_ring_cons << 16) | sw_ring_prod);
3210 if (!skb)
3211 goto next_rx;
3212 }
3213 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3214 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3215 __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3216
3217 skb->protocol = eth_type_trans(skb, bp->dev);
3218
3219 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3220 (ntohs(skb->protocol) != 0x8100)) {
3221
3222 dev_kfree_skb(skb);
3223 goto next_rx;
3224
3225 }
3226
3227 skb_checksum_none_assert(skb);
3228 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3229 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3230 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3231
3232 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3233 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3234 skb->ip_summed = CHECKSUM_UNNECESSARY;
3235 }
3236 if ((bp->dev->features & NETIF_F_RXHASH) &&
3237 ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3238 L2_FHDR_STATUS_USE_RXHASH))
3239 skb->rxhash = rx_hdr->l2_fhdr_hash;
3240
3241 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3242 napi_gro_receive(&bnapi->napi, skb);
3243 rx_pkt++;
3244
3245 next_rx:
3246 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3247 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3248
3249 if ((rx_pkt == budget))
3250 break;
3251
3252 /* Refresh hw_cons to see if there is new work */
3253 if (sw_cons == hw_cons) {
3254 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3255 rmb();
3256 }
3257 }
3258 rxr->rx_cons = sw_cons;
3259 rxr->rx_prod = sw_prod;
3260
3261 if (pg_ring_used)
3262 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3263
3264 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3265
3266 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3267
3268 mmiowb();
3269
3270 return rx_pkt;
3271
3272 }
3273
3274 /* MSI ISR - The only difference between this and the INTx ISR
3275 * is that the MSI interrupt is always serviced.
3276 */
3277 static irqreturn_t
3278 bnx2_msi(int irq, void *dev_instance)
3279 {
3280 struct bnx2_napi *bnapi = dev_instance;
3281 struct bnx2 *bp = bnapi->bp;
3282
3283 prefetch(bnapi->status_blk.msi);
3284 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3285 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3286 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3287
3288 /* Return here if interrupt is disabled. */
3289 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3290 return IRQ_HANDLED;
3291
3292 napi_schedule(&bnapi->napi);
3293
3294 return IRQ_HANDLED;
3295 }
3296
3297 static irqreturn_t
3298 bnx2_msi_1shot(int irq, void *dev_instance)
3299 {
3300 struct bnx2_napi *bnapi = dev_instance;
3301 struct bnx2 *bp = bnapi->bp;
3302
3303 prefetch(bnapi->status_blk.msi);
3304
3305 /* Return here if interrupt is disabled. */
3306 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3307 return IRQ_HANDLED;
3308
3309 napi_schedule(&bnapi->napi);
3310
3311 return IRQ_HANDLED;
3312 }
3313
3314 static irqreturn_t
3315 bnx2_interrupt(int irq, void *dev_instance)
3316 {
3317 struct bnx2_napi *bnapi = dev_instance;
3318 struct bnx2 *bp = bnapi->bp;
3319 struct status_block *sblk = bnapi->status_blk.msi;
3320
3321 /* When using INTx, it is possible for the interrupt to arrive
3322 * at the CPU before the status block posted prior to the
3323 * interrupt. Reading a register will flush the status block.
3324 * When using MSI, the MSI message will always complete after
3325 * the status block write.
3326 */
3327 if ((sblk->status_idx == bnapi->last_status_idx) &&
3328 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3329 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3330 return IRQ_NONE;
3331
3332 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3333 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3334 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3335
3336 /* Read back to deassert IRQ immediately to avoid too many
3337 * spurious interrupts.
3338 */
3339 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3340
3341 /* Return here if interrupt is shared and is disabled. */
3342 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3343 return IRQ_HANDLED;
3344
3345 if (napi_schedule_prep(&bnapi->napi)) {
3346 bnapi->last_status_idx = sblk->status_idx;
3347 __napi_schedule(&bnapi->napi);
3348 }
3349
3350 return IRQ_HANDLED;
3351 }
3352
3353 static inline int
3354 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3355 {
3356 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3357 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3358
3359 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3360 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3361 return 1;
3362 return 0;
3363 }
3364
3365 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3366 STATUS_ATTN_BITS_TIMER_ABORT)
3367
3368 static inline int
3369 bnx2_has_work(struct bnx2_napi *bnapi)
3370 {
3371 struct status_block *sblk = bnapi->status_blk.msi;
3372
3373 if (bnx2_has_fast_work(bnapi))
3374 return 1;
3375
3376 #ifdef BCM_CNIC
3377 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3378 return 1;
3379 #endif
3380
3381 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3382 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3383 return 1;
3384
3385 return 0;
3386 }
3387
3388 static void
3389 bnx2_chk_missed_msi(struct bnx2 *bp)
3390 {
3391 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3392 u32 msi_ctrl;
3393
3394 if (bnx2_has_work(bnapi)) {
3395 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3396 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3397 return;
3398
3399 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3400 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3401 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3402 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3403 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3404 }
3405 }
3406
3407 bp->idle_chk_status_idx = bnapi->last_status_idx;
3408 }
3409
3410 #ifdef BCM_CNIC
3411 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3412 {
3413 struct cnic_ops *c_ops;
3414
3415 if (!bnapi->cnic_present)
3416 return;
3417
3418 rcu_read_lock();
3419 c_ops = rcu_dereference(bp->cnic_ops);
3420 if (c_ops)
3421 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3422 bnapi->status_blk.msi);
3423 rcu_read_unlock();
3424 }
3425 #endif
3426
3427 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3428 {
3429 struct status_block *sblk = bnapi->status_blk.msi;
3430 u32 status_attn_bits = sblk->status_attn_bits;
3431 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3432
3433 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3434 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3435
3436 bnx2_phy_int(bp, bnapi);
3437
3438 /* This is needed to take care of transient status
3439 * during link changes.
3440 */
3441 BNX2_WR(bp, BNX2_HC_COMMAND,
3442 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3443 BNX2_RD(bp, BNX2_HC_COMMAND);
3444 }
3445 }
3446
3447 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3448 int work_done, int budget)
3449 {
3450 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3451 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3452
3453 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3454 bnx2_tx_int(bp, bnapi, 0);
3455
3456 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3457 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3458
3459 return work_done;
3460 }
3461
3462 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3463 {
3464 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3465 struct bnx2 *bp = bnapi->bp;
3466 int work_done = 0;
3467 struct status_block_msix *sblk = bnapi->status_blk.msix;
3468
3469 while (1) {
3470 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3471 if (unlikely(work_done >= budget))
3472 break;
3473
3474 bnapi->last_status_idx = sblk->status_idx;
3475 /* status idx must be read before checking for more work. */
3476 rmb();
3477 if (likely(!bnx2_has_fast_work(bnapi))) {
3478
3479 napi_complete(napi);
3480 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3481 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3482 bnapi->last_status_idx);
3483 break;
3484 }
3485 }
3486 return work_done;
3487 }
3488
3489 static int bnx2_poll(struct napi_struct *napi, int budget)
3490 {
3491 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3492 struct bnx2 *bp = bnapi->bp;
3493 int work_done = 0;
3494 struct status_block *sblk = bnapi->status_blk.msi;
3495
3496 while (1) {
3497 bnx2_poll_link(bp, bnapi);
3498
3499 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3500
3501 #ifdef BCM_CNIC
3502 bnx2_poll_cnic(bp, bnapi);
3503 #endif
3504
3505 /* bnapi->last_status_idx is used below to tell the hw how
3506 * much work has been processed, so we must read it before
3507 * checking for more work.
3508 */
3509 bnapi->last_status_idx = sblk->status_idx;
3510
3511 if (unlikely(work_done >= budget))
3512 break;
3513
3514 rmb();
3515 if (likely(!bnx2_has_work(bnapi))) {
3516 napi_complete(napi);
3517 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3518 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3519 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3520 bnapi->last_status_idx);
3521 break;
3522 }
3523 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3524 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3525 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3526 bnapi->last_status_idx);
3527
3528 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3529 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3530 bnapi->last_status_idx);
3531 break;
3532 }
3533 }
3534
3535 return work_done;
3536 }
3537
3538 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3539 * from set_multicast.
3540 */
3541 static void
3542 bnx2_set_rx_mode(struct net_device *dev)
3543 {
3544 struct bnx2 *bp = netdev_priv(dev);
3545 u32 rx_mode, sort_mode;
3546 struct netdev_hw_addr *ha;
3547 int i;
3548
3549 if (!netif_running(dev))
3550 return;
3551
3552 spin_lock_bh(&bp->phy_lock);
3553
3554 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3555 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3556 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3557 if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3558 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3559 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3560 if (dev->flags & IFF_PROMISC) {
3561 /* Promiscuous mode. */
3562 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3563 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3564 BNX2_RPM_SORT_USER0_PROM_VLAN;
3565 }
3566 else if (dev->flags & IFF_ALLMULTI) {
3567 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3568 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3569 0xffffffff);
3570 }
3571 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3572 }
3573 else {
3574 /* Accept one or more multicast(s). */
3575 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3576 u32 regidx;
3577 u32 bit;
3578 u32 crc;
3579
3580 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3581
3582 netdev_for_each_mc_addr(ha, dev) {
3583 crc = ether_crc_le(ETH_ALEN, ha->addr);
3584 bit = crc & 0xff;
3585 regidx = (bit & 0xe0) >> 5;
3586 bit &= 0x1f;
3587 mc_filter[regidx] |= (1 << bit);
3588 }
3589
3590 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3591 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3592 mc_filter[i]);
3593 }
3594
3595 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3596 }
3597
3598 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3599 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3600 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3601 BNX2_RPM_SORT_USER0_PROM_VLAN;
3602 } else if (!(dev->flags & IFF_PROMISC)) {
3603 /* Add all entries into to the match filter list */
3604 i = 0;
3605 netdev_for_each_uc_addr(ha, dev) {
3606 bnx2_set_mac_addr(bp, ha->addr,
3607 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3608 sort_mode |= (1 <<
3609 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3610 i++;
3611 }
3612
3613 }
3614
3615 if (rx_mode != bp->rx_mode) {
3616 bp->rx_mode = rx_mode;
3617 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3618 }
3619
3620 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3621 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3622 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3623
3624 spin_unlock_bh(&bp->phy_lock);
3625 }
3626
3627 static int
3628 check_fw_section(const struct firmware *fw,
3629 const struct bnx2_fw_file_section *section,
3630 u32 alignment, bool non_empty)
3631 {
3632 u32 offset = be32_to_cpu(section->offset);
3633 u32 len = be32_to_cpu(section->len);
3634
3635 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3636 return -EINVAL;
3637 if ((non_empty && len == 0) || len > fw->size - offset ||
3638 len & (alignment - 1))
3639 return -EINVAL;
3640 return 0;
3641 }
3642
3643 static int
3644 check_mips_fw_entry(const struct firmware *fw,
3645 const struct bnx2_mips_fw_file_entry *entry)
3646 {
3647 if (check_fw_section(fw, &entry->text, 4, true) ||
3648 check_fw_section(fw, &entry->data, 4, false) ||
3649 check_fw_section(fw, &entry->rodata, 4, false))
3650 return -EINVAL;
3651 return 0;
3652 }
3653
3654 static void bnx2_release_firmware(struct bnx2 *bp)
3655 {
3656 if (bp->rv2p_firmware) {
3657 release_firmware(bp->mips_firmware);
3658 release_firmware(bp->rv2p_firmware);
3659 bp->rv2p_firmware = NULL;
3660 }
3661 }
3662
3663 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3664 {
3665 const char *mips_fw_file, *rv2p_fw_file;
3666 const struct bnx2_mips_fw_file *mips_fw;
3667 const struct bnx2_rv2p_fw_file *rv2p_fw;
3668 int rc;
3669
3670 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3671 mips_fw_file = FW_MIPS_FILE_09;
3672 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3673 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3674 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3675 else
3676 rv2p_fw_file = FW_RV2P_FILE_09;
3677 } else {
3678 mips_fw_file = FW_MIPS_FILE_06;
3679 rv2p_fw_file = FW_RV2P_FILE_06;
3680 }
3681
3682 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3683 if (rc) {
3684 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3685 goto out;
3686 }
3687
3688 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3689 if (rc) {
3690 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3691 goto err_release_mips_firmware;
3692 }
3693 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3694 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3695 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3696 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3697 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3698 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3699 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3700 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3701 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3702 rc = -EINVAL;
3703 goto err_release_firmware;
3704 }
3705 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3706 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3707 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3708 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3709 rc = -EINVAL;
3710 goto err_release_firmware;
3711 }
3712 out:
3713 return rc;
3714
3715 err_release_firmware:
3716 release_firmware(bp->rv2p_firmware);
3717 bp->rv2p_firmware = NULL;
3718 err_release_mips_firmware:
3719 release_firmware(bp->mips_firmware);
3720 goto out;
3721 }
3722
3723 static int bnx2_request_firmware(struct bnx2 *bp)
3724 {
3725 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3726 }
3727
3728 static u32
3729 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3730 {
3731 switch (idx) {
3732 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3733 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3734 rv2p_code |= RV2P_BD_PAGE_SIZE;
3735 break;
3736 }
3737 return rv2p_code;
3738 }
3739
3740 static int
3741 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3742 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3743 {
3744 u32 rv2p_code_len, file_offset;
3745 __be32 *rv2p_code;
3746 int i;
3747 u32 val, cmd, addr;
3748
3749 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3750 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3751
3752 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3753
3754 if (rv2p_proc == RV2P_PROC1) {
3755 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3756 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3757 } else {
3758 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3759 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3760 }
3761
3762 for (i = 0; i < rv2p_code_len; i += 8) {
3763 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3764 rv2p_code++;
3765 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3766 rv2p_code++;
3767
3768 val = (i / 8) | cmd;
3769 BNX2_WR(bp, addr, val);
3770 }
3771
3772 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3773 for (i = 0; i < 8; i++) {
3774 u32 loc, code;
3775
3776 loc = be32_to_cpu(fw_entry->fixup[i]);
3777 if (loc && ((loc * 4) < rv2p_code_len)) {
3778 code = be32_to_cpu(*(rv2p_code + loc - 1));
3779 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3780 code = be32_to_cpu(*(rv2p_code + loc));
3781 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3782 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3783
3784 val = (loc / 2) | cmd;
3785 BNX2_WR(bp, addr, val);
3786 }
3787 }
3788
3789 /* Reset the processor, un-stall is done later. */
3790 if (rv2p_proc == RV2P_PROC1) {
3791 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3792 }
3793 else {
3794 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3795 }
3796
3797 return 0;
3798 }
3799
3800 static int
3801 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3802 const struct bnx2_mips_fw_file_entry *fw_entry)
3803 {
3804 u32 addr, len, file_offset;
3805 __be32 *data;
3806 u32 offset;
3807 u32 val;
3808
3809 /* Halt the CPU. */
3810 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3811 val |= cpu_reg->mode_value_halt;
3812 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3813 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3814
3815 /* Load the Text area. */
3816 addr = be32_to_cpu(fw_entry->text.addr);
3817 len = be32_to_cpu(fw_entry->text.len);
3818 file_offset = be32_to_cpu(fw_entry->text.offset);
3819 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3820
3821 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3822 if (len) {
3823 int j;
3824
3825 for (j = 0; j < (len / 4); j++, offset += 4)
3826 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3827 }
3828
3829 /* Load the Data area. */
3830 addr = be32_to_cpu(fw_entry->data.addr);
3831 len = be32_to_cpu(fw_entry->data.len);
3832 file_offset = be32_to_cpu(fw_entry->data.offset);
3833 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3834
3835 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3836 if (len) {
3837 int j;
3838
3839 for (j = 0; j < (len / 4); j++, offset += 4)
3840 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3841 }
3842
3843 /* Load the Read-Only area. */
3844 addr = be32_to_cpu(fw_entry->rodata.addr);
3845 len = be32_to_cpu(fw_entry->rodata.len);
3846 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3847 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3848
3849 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3850 if (len) {
3851 int j;
3852
3853 for (j = 0; j < (len / 4); j++, offset += 4)
3854 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3855 }
3856
3857 /* Clear the pre-fetch instruction. */
3858 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3859
3860 val = be32_to_cpu(fw_entry->start_addr);
3861 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3862
3863 /* Start the CPU. */
3864 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3865 val &= ~cpu_reg->mode_value_halt;
3866 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3867 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3868
3869 return 0;
3870 }
3871
3872 static int
3873 bnx2_init_cpus(struct bnx2 *bp)
3874 {
3875 const struct bnx2_mips_fw_file *mips_fw =
3876 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3877 const struct bnx2_rv2p_fw_file *rv2p_fw =
3878 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3879 int rc;
3880
3881 /* Initialize the RV2P processor. */
3882 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3883 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3884
3885 /* Initialize the RX Processor. */
3886 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3887 if (rc)
3888 goto init_cpu_err;
3889
3890 /* Initialize the TX Processor. */
3891 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3892 if (rc)
3893 goto init_cpu_err;
3894
3895 /* Initialize the TX Patch-up Processor. */
3896 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3897 if (rc)
3898 goto init_cpu_err;
3899
3900 /* Initialize the Completion Processor. */
3901 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3902 if (rc)
3903 goto init_cpu_err;
3904
3905 /* Initialize the Command Processor. */
3906 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3907
3908 init_cpu_err:
3909 return rc;
3910 }
3911
3912 static int
3913 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3914 {
3915 u16 pmcsr;
3916
3917 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3918
3919 switch (state) {
3920 case PCI_D0: {
3921 u32 val;
3922
3923 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3924 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3925 PCI_PM_CTRL_PME_STATUS);
3926
3927 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3928 /* delay required during transition out of D3hot */
3929 msleep(20);
3930
3931 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3932 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3933 val &= ~BNX2_EMAC_MODE_MPKT;
3934 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3935
3936 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3937 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3938 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3939 break;
3940 }
3941 case PCI_D3hot: {
3942 int i;
3943 u32 val, wol_msg;
3944
3945 if (bp->wol) {
3946 u32 advertising;
3947 u8 autoneg;
3948
3949 autoneg = bp->autoneg;
3950 advertising = bp->advertising;
3951
3952 if (bp->phy_port == PORT_TP) {
3953 bp->autoneg = AUTONEG_SPEED;
3954 bp->advertising = ADVERTISED_10baseT_Half |
3955 ADVERTISED_10baseT_Full |
3956 ADVERTISED_100baseT_Half |
3957 ADVERTISED_100baseT_Full |
3958 ADVERTISED_Autoneg;
3959 }
3960
3961 spin_lock_bh(&bp->phy_lock);
3962 bnx2_setup_phy(bp, bp->phy_port);
3963 spin_unlock_bh(&bp->phy_lock);
3964
3965 bp->autoneg = autoneg;
3966 bp->advertising = advertising;
3967
3968 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3969
3970 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3971
3972 /* Enable port mode. */
3973 val &= ~BNX2_EMAC_MODE_PORT;
3974 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3975 BNX2_EMAC_MODE_ACPI_RCVD |
3976 BNX2_EMAC_MODE_MPKT;
3977 if (bp->phy_port == PORT_TP)
3978 val |= BNX2_EMAC_MODE_PORT_MII;
3979 else {
3980 val |= BNX2_EMAC_MODE_PORT_GMII;
3981 if (bp->line_speed == SPEED_2500)
3982 val |= BNX2_EMAC_MODE_25G_MODE;
3983 }
3984
3985 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3986
3987 /* receive all multicast */
3988 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3989 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3990 0xffffffff);
3991 }
3992 BNX2_WR(bp, BNX2_EMAC_RX_MODE,
3993 BNX2_EMAC_RX_MODE_SORT_MODE);
3994
3995 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3996 BNX2_RPM_SORT_USER0_MC_EN;
3997 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3998 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3999 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
4000 BNX2_RPM_SORT_USER0_ENA);
4001
4002 /* Need to enable EMAC and RPM for WOL. */
4003 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4004 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4005 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4006 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4007
4008 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4009 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4010 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4011
4012 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4013 }
4014 else {
4015 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4016 }
4017
4018 if (!(bp->flags & BNX2_FLAG_NO_WOL))
4019 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4020 1, 0);
4021
4022 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4023 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4024 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4025
4026 if (bp->wol)
4027 pmcsr |= 3;
4028 }
4029 else {
4030 pmcsr |= 3;
4031 }
4032 if (bp->wol) {
4033 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4034 }
4035 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4036 pmcsr);
4037
4038 /* No more memory access after this point until
4039 * device is brought back to D0.
4040 */
4041 udelay(50);
4042 break;
4043 }
4044 default:
4045 return -EINVAL;
4046 }
4047 return 0;
4048 }
4049
4050 static int
4051 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4052 {
4053 u32 val;
4054 int j;
4055
4056 /* Request access to the flash interface. */
4057 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4058 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4059 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4060 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4061 break;
4062
4063 udelay(5);
4064 }
4065
4066 if (j >= NVRAM_TIMEOUT_COUNT)
4067 return -EBUSY;
4068
4069 return 0;
4070 }
4071
4072 static int
4073 bnx2_release_nvram_lock(struct bnx2 *bp)
4074 {
4075 int j;
4076 u32 val;
4077
4078 /* Relinquish nvram interface. */
4079 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4080
4081 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4082 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4083 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4084 break;
4085
4086 udelay(5);
4087 }
4088
4089 if (j >= NVRAM_TIMEOUT_COUNT)
4090 return -EBUSY;
4091
4092 return 0;
4093 }
4094
4095
4096 static int
4097 bnx2_enable_nvram_write(struct bnx2 *bp)
4098 {
4099 u32 val;
4100
4101 val = BNX2_RD(bp, BNX2_MISC_CFG);
4102 BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4103
4104 if (bp->flash_info->flags & BNX2_NV_WREN) {
4105 int j;
4106
4107 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4108 BNX2_WR(bp, BNX2_NVM_COMMAND,
4109 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4110
4111 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4112 udelay(5);
4113
4114 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4115 if (val & BNX2_NVM_COMMAND_DONE)
4116 break;
4117 }
4118
4119 if (j >= NVRAM_TIMEOUT_COUNT)
4120 return -EBUSY;
4121 }
4122 return 0;
4123 }
4124
4125 static void
4126 bnx2_disable_nvram_write(struct bnx2 *bp)
4127 {
4128 u32 val;
4129
4130 val = BNX2_RD(bp, BNX2_MISC_CFG);
4131 BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4132 }
4133
4134
4135 static void
4136 bnx2_enable_nvram_access(struct bnx2 *bp)
4137 {
4138 u32 val;
4139
4140 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4141 /* Enable both bits, even on read. */
4142 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4143 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4144 }
4145
4146 static void
4147 bnx2_disable_nvram_access(struct bnx2 *bp)
4148 {
4149 u32 val;
4150
4151 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4152 /* Disable both bits, even after read. */
4153 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4154 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4155 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4156 }
4157
4158 static int
4159 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4160 {
4161 u32 cmd;
4162 int j;
4163
4164 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4165 /* Buffered flash, no erase needed */
4166 return 0;
4167
4168 /* Build an erase command */
4169 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4170 BNX2_NVM_COMMAND_DOIT;
4171
4172 /* Need to clear DONE bit separately. */
4173 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4174
4175 /* Address of the NVRAM to read from. */
4176 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4177
4178 /* Issue an erase command. */
4179 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4180
4181 /* Wait for completion. */
4182 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4183 u32 val;
4184
4185 udelay(5);
4186
4187 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4188 if (val & BNX2_NVM_COMMAND_DONE)
4189 break;
4190 }
4191
4192 if (j >= NVRAM_TIMEOUT_COUNT)
4193 return -EBUSY;
4194
4195 return 0;
4196 }
4197
4198 static int
4199 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4200 {
4201 u32 cmd;
4202 int j;
4203
4204 /* Build the command word. */
4205 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4206
4207 /* Calculate an offset of a buffered flash, not needed for 5709. */
4208 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4209 offset = ((offset / bp->flash_info->page_size) <<
4210 bp->flash_info->page_bits) +
4211 (offset % bp->flash_info->page_size);
4212 }
4213
4214 /* Need to clear DONE bit separately. */
4215 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4216
4217 /* Address of the NVRAM to read from. */
4218 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4219
4220 /* Issue a read command. */
4221 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4222
4223 /* Wait for completion. */
4224 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4225 u32 val;
4226
4227 udelay(5);
4228
4229 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4230 if (val & BNX2_NVM_COMMAND_DONE) {
4231 __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4232 memcpy(ret_val, &v, 4);
4233 break;
4234 }
4235 }
4236 if (j >= NVRAM_TIMEOUT_COUNT)
4237 return -EBUSY;
4238
4239 return 0;
4240 }
4241
4242
4243 static int
4244 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4245 {
4246 u32 cmd;
4247 __be32 val32;
4248 int j;
4249
4250 /* Build the command word. */
4251 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4252
4253 /* Calculate an offset of a buffered flash, not needed for 5709. */
4254 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4255 offset = ((offset / bp->flash_info->page_size) <<
4256 bp->flash_info->page_bits) +
4257 (offset % bp->flash_info->page_size);
4258 }
4259
4260 /* Need to clear DONE bit separately. */
4261 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4262
4263 memcpy(&val32, val, 4);
4264
4265 /* Write the data. */
4266 BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4267
4268 /* Address of the NVRAM to write to. */
4269 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4270
4271 /* Issue the write command. */
4272 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4273
4274 /* Wait for completion. */
4275 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4276 udelay(5);
4277
4278 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4279 break;
4280 }
4281 if (j >= NVRAM_TIMEOUT_COUNT)
4282 return -EBUSY;
4283
4284 return 0;
4285 }
4286
4287 static int
4288 bnx2_init_nvram(struct bnx2 *bp)
4289 {
4290 u32 val;
4291 int j, entry_count, rc = 0;
4292 const struct flash_spec *flash;
4293
4294 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4295 bp->flash_info = &flash_5709;
4296 goto get_flash_size;
4297 }
4298
4299 /* Determine the selected interface. */
4300 val = BNX2_RD(bp, BNX2_NVM_CFG1);
4301
4302 entry_count = ARRAY_SIZE(flash_table);
4303
4304 if (val & 0x40000000) {
4305
4306 /* Flash interface has been reconfigured */
4307 for (j = 0, flash = &flash_table[0]; j < entry_count;
4308 j++, flash++) {
4309 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4310 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4311 bp->flash_info = flash;
4312 break;
4313 }
4314 }
4315 }
4316 else {
4317 u32 mask;
4318 /* Not yet been reconfigured */
4319
4320 if (val & (1 << 23))
4321 mask = FLASH_BACKUP_STRAP_MASK;
4322 else
4323 mask = FLASH_STRAP_MASK;
4324
4325 for (j = 0, flash = &flash_table[0]; j < entry_count;
4326 j++, flash++) {
4327
4328 if ((val & mask) == (flash->strapping & mask)) {
4329 bp->flash_info = flash;
4330
4331 /* Request access to the flash interface. */
4332 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4333 return rc;
4334
4335 /* Enable access to flash interface */
4336 bnx2_enable_nvram_access(bp);
4337
4338 /* Reconfigure the flash interface */
4339 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4340 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4341 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4342 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4343
4344 /* Disable access to flash interface */
4345 bnx2_disable_nvram_access(bp);
4346 bnx2_release_nvram_lock(bp);
4347
4348 break;
4349 }
4350 }
4351 } /* if (val & 0x40000000) */
4352
4353 if (j == entry_count) {
4354 bp->flash_info = NULL;
4355 pr_alert("Unknown flash/EEPROM type\n");
4356 return -ENODEV;
4357 }
4358
4359 get_flash_size:
4360 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4361 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4362 if (val)
4363 bp->flash_size = val;
4364 else
4365 bp->flash_size = bp->flash_info->total_size;
4366
4367 return rc;
4368 }
4369
4370 static int
4371 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4372 int buf_size)
4373 {
4374 int rc = 0;
4375 u32 cmd_flags, offset32, len32, extra;
4376
4377 if (buf_size == 0)
4378 return 0;
4379
4380 /* Request access to the flash interface. */
4381 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4382 return rc;
4383
4384 /* Enable access to flash interface */
4385 bnx2_enable_nvram_access(bp);
4386
4387 len32 = buf_size;
4388 offset32 = offset;
4389 extra = 0;
4390
4391 cmd_flags = 0;
4392
4393 if (offset32 & 3) {
4394 u8 buf[4];
4395 u32 pre_len;
4396
4397 offset32 &= ~3;
4398 pre_len = 4 - (offset & 3);
4399
4400 if (pre_len >= len32) {
4401 pre_len = len32;
4402 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4403 BNX2_NVM_COMMAND_LAST;
4404 }
4405 else {
4406 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4407 }
4408
4409 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4410
4411 if (rc)
4412 return rc;
4413
4414 memcpy(ret_buf, buf + (offset & 3), pre_len);
4415
4416 offset32 += 4;
4417 ret_buf += pre_len;
4418 len32 -= pre_len;
4419 }
4420 if (len32 & 3) {
4421 extra = 4 - (len32 & 3);
4422 len32 = (len32 + 4) & ~3;
4423 }
4424
4425 if (len32 == 4) {
4426 u8 buf[4];
4427
4428 if (cmd_flags)
4429 cmd_flags = BNX2_NVM_COMMAND_LAST;
4430 else
4431 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4432 BNX2_NVM_COMMAND_LAST;
4433
4434 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4435
4436 memcpy(ret_buf, buf, 4 - extra);
4437 }
4438 else if (len32 > 0) {
4439 u8 buf[4];
4440
4441 /* Read the first word. */
4442 if (cmd_flags)
4443 cmd_flags = 0;
4444 else
4445 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4446
4447 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4448
4449 /* Advance to the next dword. */
4450 offset32 += 4;
4451 ret_buf += 4;
4452 len32 -= 4;
4453
4454 while (len32 > 4 && rc == 0) {
4455 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4456
4457 /* Advance to the next dword. */
4458 offset32 += 4;
4459 ret_buf += 4;
4460 len32 -= 4;
4461 }
4462
4463 if (rc)
4464 return rc;
4465
4466 cmd_flags = BNX2_NVM_COMMAND_LAST;
4467 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4468
4469 memcpy(ret_buf, buf, 4 - extra);
4470 }
4471
4472 /* Disable access to flash interface */
4473 bnx2_disable_nvram_access(bp);
4474
4475 bnx2_release_nvram_lock(bp);
4476
4477 return rc;
4478 }
4479
4480 static int
4481 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4482 int buf_size)
4483 {
4484 u32 written, offset32, len32;
4485 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4486 int rc = 0;
4487 int align_start, align_end;
4488
4489 buf = data_buf;
4490 offset32 = offset;
4491 len32 = buf_size;
4492 align_start = align_end = 0;
4493
4494 if ((align_start = (offset32 & 3))) {
4495 offset32 &= ~3;
4496 len32 += align_start;
4497 if (len32 < 4)
4498 len32 = 4;
4499 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4500 return rc;
4501 }
4502
4503 if (len32 & 3) {
4504 align_end = 4 - (len32 & 3);
4505 len32 += align_end;
4506 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4507 return rc;
4508 }
4509
4510 if (align_start || align_end) {
4511 align_buf = kmalloc(len32, GFP_KERNEL);
4512 if (align_buf == NULL)
4513 return -ENOMEM;
4514 if (align_start) {
4515 memcpy(align_buf, start, 4);
4516 }
4517 if (align_end) {
4518 memcpy(align_buf + len32 - 4, end, 4);
4519 }
4520 memcpy(align_buf + align_start, data_buf, buf_size);
4521 buf = align_buf;
4522 }
4523
4524 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4525 flash_buffer = kmalloc(264, GFP_KERNEL);
4526 if (flash_buffer == NULL) {
4527 rc = -ENOMEM;
4528 goto nvram_write_end;
4529 }
4530 }
4531
4532 written = 0;
4533 while ((written < len32) && (rc == 0)) {
4534 u32 page_start, page_end, data_start, data_end;
4535 u32 addr, cmd_flags;
4536 int i;
4537
4538 /* Find the page_start addr */
4539 page_start = offset32 + written;
4540 page_start -= (page_start % bp->flash_info->page_size);
4541 /* Find the page_end addr */
4542 page_end = page_start + bp->flash_info->page_size;
4543 /* Find the data_start addr */
4544 data_start = (written == 0) ? offset32 : page_start;
4545 /* Find the data_end addr */
4546 data_end = (page_end > offset32 + len32) ?
4547 (offset32 + len32) : page_end;
4548
4549 /* Request access to the flash interface. */
4550 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4551 goto nvram_write_end;
4552
4553 /* Enable access to flash interface */
4554 bnx2_enable_nvram_access(bp);
4555
4556 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4557 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4558 int j;
4559
4560 /* Read the whole page into the buffer
4561 * (non-buffer flash only) */
4562 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4563 if (j == (bp->flash_info->page_size - 4)) {
4564 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4565 }
4566 rc = bnx2_nvram_read_dword(bp,
4567 page_start + j,
4568 &flash_buffer[j],
4569 cmd_flags);
4570
4571 if (rc)
4572 goto nvram_write_end;
4573
4574 cmd_flags = 0;
4575 }
4576 }
4577
4578 /* Enable writes to flash interface (unlock write-protect) */
4579 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4580 goto nvram_write_end;
4581
4582 /* Loop to write back the buffer data from page_start to
4583 * data_start */
4584 i = 0;
4585 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4586 /* Erase the page */
4587 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4588 goto nvram_write_end;
4589
4590 /* Re-enable the write again for the actual write */
4591 bnx2_enable_nvram_write(bp);
4592
4593 for (addr = page_start; addr < data_start;
4594 addr += 4, i += 4) {
4595
4596 rc = bnx2_nvram_write_dword(bp, addr,
4597 &flash_buffer[i], cmd_flags);
4598
4599 if (rc != 0)
4600 goto nvram_write_end;
4601
4602 cmd_flags = 0;
4603 }
4604 }
4605
4606 /* Loop to write the new data from data_start to data_end */
4607 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4608 if ((addr == page_end - 4) ||
4609 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4610 (addr == data_end - 4))) {
4611
4612 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4613 }
4614 rc = bnx2_nvram_write_dword(bp, addr, buf,
4615 cmd_flags);
4616
4617 if (rc != 0)
4618 goto nvram_write_end;
4619
4620 cmd_flags = 0;
4621 buf += 4;
4622 }
4623
4624 /* Loop to write back the buffer data from data_end
4625 * to page_end */
4626 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4627 for (addr = data_end; addr < page_end;
4628 addr += 4, i += 4) {
4629
4630 if (addr == page_end-4) {
4631 cmd_flags = BNX2_NVM_COMMAND_LAST;
4632 }
4633 rc = bnx2_nvram_write_dword(bp, addr,
4634 &flash_buffer[i], cmd_flags);
4635
4636 if (rc != 0)
4637 goto nvram_write_end;
4638
4639 cmd_flags = 0;
4640 }
4641 }
4642
4643 /* Disable writes to flash interface (lock write-protect) */
4644 bnx2_disable_nvram_write(bp);
4645
4646 /* Disable access to flash interface */
4647 bnx2_disable_nvram_access(bp);
4648 bnx2_release_nvram_lock(bp);
4649
4650 /* Increment written */
4651 written += data_end - data_start;
4652 }
4653
4654 nvram_write_end:
4655 kfree(flash_buffer);
4656 kfree(align_buf);
4657 return rc;
4658 }
4659
4660 static void
4661 bnx2_init_fw_cap(struct bnx2 *bp)
4662 {
4663 u32 val, sig = 0;
4664
4665 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4666 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4667
4668 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4669 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4670
4671 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4672 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4673 return;
4674
4675 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4676 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4677 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4678 }
4679
4680 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4681 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4682 u32 link;
4683
4684 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4685
4686 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4687 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4688 bp->phy_port = PORT_FIBRE;
4689 else
4690 bp->phy_port = PORT_TP;
4691
4692 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4693 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4694 }
4695
4696 if (netif_running(bp->dev) && sig)
4697 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4698 }
4699
4700 static void
4701 bnx2_setup_msix_tbl(struct bnx2 *bp)
4702 {
4703 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4704
4705 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4706 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4707 }
4708
4709 static int
4710 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4711 {
4712 u32 val;
4713 int i, rc = 0;
4714 u8 old_port;
4715
4716 /* Wait for the current PCI transaction to complete before
4717 * issuing a reset. */
4718 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4719 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4720 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4721 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4722 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4723 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4724 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4725 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4726 udelay(5);
4727 } else { /* 5709 */
4728 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4729 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4730 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4731 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4732
4733 for (i = 0; i < 100; i++) {
4734 msleep(1);
4735 val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4736 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4737 break;
4738 }
4739 }
4740
4741 /* Wait for the firmware to tell us it is ok to issue a reset. */
4742 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4743
4744 /* Deposit a driver reset signature so the firmware knows that
4745 * this is a soft reset. */
4746 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4747 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4748
4749 /* Do a dummy read to force the chip to complete all current transaction
4750 * before we issue a reset. */
4751 val = BNX2_RD(bp, BNX2_MISC_ID);
4752
4753 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4754 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4755 BNX2_RD(bp, BNX2_MISC_COMMAND);
4756 udelay(5);
4757
4758 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4759 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4760
4761 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4762
4763 } else {
4764 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4765 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4766 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4767
4768 /* Chip reset. */
4769 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4770
4771 /* Reading back any register after chip reset will hang the
4772 * bus on 5706 A0 and A1. The msleep below provides plenty
4773 * of margin for write posting.
4774 */
4775 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4776 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4777 msleep(20);
4778
4779 /* Reset takes approximate 30 usec */
4780 for (i = 0; i < 10; i++) {
4781 val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4782 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4783 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4784 break;
4785 udelay(10);
4786 }
4787
4788 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4789 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4790 pr_err("Chip reset did not complete\n");
4791 return -EBUSY;
4792 }
4793 }
4794
4795 /* Make sure byte swapping is properly configured. */
4796 val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4797 if (val != 0x01020304) {
4798 pr_err("Chip not in correct endian mode\n");
4799 return -ENODEV;
4800 }
4801
4802 /* Wait for the firmware to finish its initialization. */
4803 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4804 if (rc)
4805 return rc;
4806
4807 spin_lock_bh(&bp->phy_lock);
4808 old_port = bp->phy_port;
4809 bnx2_init_fw_cap(bp);
4810 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4811 old_port != bp->phy_port)
4812 bnx2_set_default_remote_link(bp);
4813 spin_unlock_bh(&bp->phy_lock);
4814
4815 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4816 /* Adjust the voltage regular to two steps lower. The default
4817 * of this register is 0x0000000e. */
4818 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4819
4820 /* Remove bad rbuf memory from the free pool. */
4821 rc = bnx2_alloc_bad_rbuf(bp);
4822 }
4823
4824 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4825 bnx2_setup_msix_tbl(bp);
4826 /* Prevent MSIX table reads and write from timing out */
4827 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4828 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4829 }
4830
4831 return rc;
4832 }
4833
4834 static int
4835 bnx2_init_chip(struct bnx2 *bp)
4836 {
4837 u32 val, mtu;
4838 int rc, i;
4839
4840 /* Make sure the interrupt is not active. */
4841 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4842
4843 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4844 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4845 #ifdef __BIG_ENDIAN
4846 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4847 #endif
4848 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4849 DMA_READ_CHANS << 12 |
4850 DMA_WRITE_CHANS << 16;
4851
4852 val |= (0x2 << 20) | (1 << 11);
4853
4854 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4855 val |= (1 << 23);
4856
4857 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4858 (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4859 !(bp->flags & BNX2_FLAG_PCIX))
4860 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4861
4862 BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4863
4864 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4865 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4866 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4867 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4868 }
4869
4870 if (bp->flags & BNX2_FLAG_PCIX) {
4871 u16 val16;
4872
4873 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4874 &val16);
4875 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4876 val16 & ~PCI_X_CMD_ERO);
4877 }
4878
4879 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4880 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4881 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4882 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4883
4884 /* Initialize context mapping and zero out the quick contexts. The
4885 * context block must have already been enabled. */
4886 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4887 rc = bnx2_init_5709_context(bp);
4888 if (rc)
4889 return rc;
4890 } else
4891 bnx2_init_context(bp);
4892
4893 if ((rc = bnx2_init_cpus(bp)) != 0)
4894 return rc;
4895
4896 bnx2_init_nvram(bp);
4897
4898 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4899
4900 val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4901 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4902 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4903 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4904 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4905 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4906 val |= BNX2_MQ_CONFIG_HALT_DIS;
4907 }
4908
4909 BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4910
4911 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4912 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4913 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4914
4915 val = (BNX2_PAGE_BITS - 8) << 24;
4916 BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4917
4918 /* Configure page size. */
4919 val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4920 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4921 val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4922 BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4923
4924 val = bp->mac_addr[0] +
4925 (bp->mac_addr[1] << 8) +
4926 (bp->mac_addr[2] << 16) +
4927 bp->mac_addr[3] +
4928 (bp->mac_addr[4] << 8) +
4929 (bp->mac_addr[5] << 16);
4930 BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4931
4932 /* Program the MTU. Also include 4 bytes for CRC32. */
4933 mtu = bp->dev->mtu;
4934 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4935 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4936 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4937 BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4938
4939 if (mtu < 1500)
4940 mtu = 1500;
4941
4942 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4943 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4944 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4945
4946 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4947 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4948 bp->bnx2_napi[i].last_status_idx = 0;
4949
4950 bp->idle_chk_status_idx = 0xffff;
4951
4952 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4953
4954 /* Set up how to generate a link change interrupt. */
4955 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4956
4957 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4958 (u64) bp->status_blk_mapping & 0xffffffff);
4959 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4960
4961 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4962 (u64) bp->stats_blk_mapping & 0xffffffff);
4963 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4964 (u64) bp->stats_blk_mapping >> 32);
4965
4966 BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4967 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4968
4969 BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4970 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4971
4972 BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4973 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4974
4975 BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4976
4977 BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4978
4979 BNX2_WR(bp, BNX2_HC_COM_TICKS,
4980 (bp->com_ticks_int << 16) | bp->com_ticks);
4981
4982 BNX2_WR(bp, BNX2_HC_CMD_TICKS,
4983 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4984
4985 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4986 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4987 else
4988 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4989 BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4990
4991 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
4992 val = BNX2_HC_CONFIG_COLLECT_STATS;
4993 else {
4994 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4995 BNX2_HC_CONFIG_COLLECT_STATS;
4996 }
4997
4998 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4999 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5000 BNX2_HC_MSIX_BIT_VECTOR_VAL);
5001
5002 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5003 }
5004
5005 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5006 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5007
5008 BNX2_WR(bp, BNX2_HC_CONFIG, val);
5009
5010 if (bp->rx_ticks < 25)
5011 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5012 else
5013 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5014
5015 for (i = 1; i < bp->irq_nvecs; i++) {
5016 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5017 BNX2_HC_SB_CONFIG_1;
5018
5019 BNX2_WR(bp, base,
5020 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5021 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5022 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5023
5024 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5025 (bp->tx_quick_cons_trip_int << 16) |
5026 bp->tx_quick_cons_trip);
5027
5028 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5029 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5030
5031 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5032 (bp->rx_quick_cons_trip_int << 16) |
5033 bp->rx_quick_cons_trip);
5034
5035 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5036 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5037 }
5038
5039 /* Clear internal stats counters. */
5040 BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5041
5042 BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5043
5044 /* Initialize the receive filter. */
5045 bnx2_set_rx_mode(bp->dev);
5046
5047 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5048 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5049 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5050 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5051 }
5052 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5053 1, 0);
5054
5055 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5056 BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5057
5058 udelay(20);
5059
5060 bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5061
5062 return rc;
5063 }
5064
5065 static void
5066 bnx2_clear_ring_states(struct bnx2 *bp)
5067 {
5068 struct bnx2_napi *bnapi;
5069 struct bnx2_tx_ring_info *txr;
5070 struct bnx2_rx_ring_info *rxr;
5071 int i;
5072
5073 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5074 bnapi = &bp->bnx2_napi[i];
5075 txr = &bnapi->tx_ring;
5076 rxr = &bnapi->rx_ring;
5077
5078 txr->tx_cons = 0;
5079 txr->hw_tx_cons = 0;
5080 rxr->rx_prod_bseq = 0;
5081 rxr->rx_prod = 0;
5082 rxr->rx_cons = 0;
5083 rxr->rx_pg_prod = 0;
5084 rxr->rx_pg_cons = 0;
5085 }
5086 }
5087
5088 static void
5089 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5090 {
5091 u32 val, offset0, offset1, offset2, offset3;
5092 u32 cid_addr = GET_CID_ADDR(cid);
5093
5094 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5095 offset0 = BNX2_L2CTX_TYPE_XI;
5096 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5097 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5098 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5099 } else {
5100 offset0 = BNX2_L2CTX_TYPE;
5101 offset1 = BNX2_L2CTX_CMD_TYPE;
5102 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5103 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5104 }
5105 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5106 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5107
5108 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5109 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5110
5111 val = (u64) txr->tx_desc_mapping >> 32;
5112 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5113
5114 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5115 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5116 }
5117
5118 static void
5119 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5120 {
5121 struct bnx2_tx_bd *txbd;
5122 u32 cid = TX_CID;
5123 struct bnx2_napi *bnapi;
5124 struct bnx2_tx_ring_info *txr;
5125
5126 bnapi = &bp->bnx2_napi[ring_num];
5127 txr = &bnapi->tx_ring;
5128
5129 if (ring_num == 0)
5130 cid = TX_CID;
5131 else
5132 cid = TX_TSS_CID + ring_num - 1;
5133
5134 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5135
5136 txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5137
5138 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5139 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5140
5141 txr->tx_prod = 0;
5142 txr->tx_prod_bseq = 0;
5143
5144 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5145 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5146
5147 bnx2_init_tx_context(bp, cid, txr);
5148 }
5149
5150 static void
5151 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5152 u32 buf_size, int num_rings)
5153 {
5154 int i;
5155 struct bnx2_rx_bd *rxbd;
5156
5157 for (i = 0; i < num_rings; i++) {
5158 int j;
5159
5160 rxbd = &rx_ring[i][0];
5161 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5162 rxbd->rx_bd_len = buf_size;
5163 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5164 }
5165 if (i == (num_rings - 1))
5166 j = 0;
5167 else
5168 j = i + 1;
5169 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5170 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5171 }
5172 }
5173
5174 static void
5175 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5176 {
5177 int i;
5178 u16 prod, ring_prod;
5179 u32 cid, rx_cid_addr, val;
5180 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5181 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5182
5183 if (ring_num == 0)
5184 cid = RX_CID;
5185 else
5186 cid = RX_RSS_CID + ring_num - 1;
5187
5188 rx_cid_addr = GET_CID_ADDR(cid);
5189
5190 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5191 bp->rx_buf_use_size, bp->rx_max_ring);
5192
5193 bnx2_init_rx_context(bp, cid);
5194
5195 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5196 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5197 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5198 }
5199
5200 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5201 if (bp->rx_pg_ring_size) {
5202 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5203 rxr->rx_pg_desc_mapping,
5204 PAGE_SIZE, bp->rx_max_pg_ring);
5205 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5206 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5207 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5208 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5209
5210 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5211 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5212
5213 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5214 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5215
5216 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5217 BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5218 }
5219
5220 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5221 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5222
5223 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5224 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5225
5226 ring_prod = prod = rxr->rx_pg_prod;
5227 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5228 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5229 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5230 ring_num, i, bp->rx_pg_ring_size);
5231 break;
5232 }
5233 prod = BNX2_NEXT_RX_BD(prod);
5234 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5235 }
5236 rxr->rx_pg_prod = prod;
5237
5238 ring_prod = prod = rxr->rx_prod;
5239 for (i = 0; i < bp->rx_ring_size; i++) {
5240 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5241 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5242 ring_num, i, bp->rx_ring_size);
5243 break;
5244 }
5245 prod = BNX2_NEXT_RX_BD(prod);
5246 ring_prod = BNX2_RX_RING_IDX(prod);
5247 }
5248 rxr->rx_prod = prod;
5249
5250 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5251 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5252 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5253
5254 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5255 BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5256
5257 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5258 }
5259
5260 static void
5261 bnx2_init_all_rings(struct bnx2 *bp)
5262 {
5263 int i;
5264 u32 val;
5265
5266 bnx2_clear_ring_states(bp);
5267
5268 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5269 for (i = 0; i < bp->num_tx_rings; i++)
5270 bnx2_init_tx_ring(bp, i);
5271
5272 if (bp->num_tx_rings > 1)
5273 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5274 (TX_TSS_CID << 7));
5275
5276 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5277 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5278
5279 for (i = 0; i < bp->num_rx_rings; i++)
5280 bnx2_init_rx_ring(bp, i);
5281
5282 if (bp->num_rx_rings > 1) {
5283 u32 tbl_32 = 0;
5284
5285 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5286 int shift = (i % 8) << 2;
5287
5288 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5289 if ((i % 8) == 7) {
5290 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5291 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5292 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5293 BNX2_RLUP_RSS_COMMAND_WRITE |
5294 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5295 tbl_32 = 0;
5296 }
5297 }
5298
5299 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5300 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5301
5302 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5303
5304 }
5305 }
5306
5307 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5308 {
5309 u32 max, num_rings = 1;
5310
5311 while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5312 ring_size -= BNX2_MAX_RX_DESC_CNT;
5313 num_rings++;
5314 }
5315 /* round to next power of 2 */
5316 max = max_size;
5317 while ((max & num_rings) == 0)
5318 max >>= 1;
5319
5320 if (num_rings != max)
5321 max <<= 1;
5322
5323 return max;
5324 }
5325
5326 static void
5327 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5328 {
5329 u32 rx_size, rx_space, jumbo_size;
5330
5331 /* 8 for CRC and VLAN */
5332 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5333
5334 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5335 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5336
5337 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5338 bp->rx_pg_ring_size = 0;
5339 bp->rx_max_pg_ring = 0;
5340 bp->rx_max_pg_ring_idx = 0;
5341 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5342 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5343
5344 jumbo_size = size * pages;
5345 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5346 jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5347
5348 bp->rx_pg_ring_size = jumbo_size;
5349 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5350 BNX2_MAX_RX_PG_RINGS);
5351 bp->rx_max_pg_ring_idx =
5352 (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5353 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5354 bp->rx_copy_thresh = 0;
5355 }
5356
5357 bp->rx_buf_use_size = rx_size;
5358 /* hw alignment + build_skb() overhead*/
5359 bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5360 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5361 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5362 bp->rx_ring_size = size;
5363 bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5364 bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5365 }
5366
5367 static void
5368 bnx2_free_tx_skbs(struct bnx2 *bp)
5369 {
5370 int i;
5371
5372 for (i = 0; i < bp->num_tx_rings; i++) {
5373 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5374 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5375 int j;
5376
5377 if (txr->tx_buf_ring == NULL)
5378 continue;
5379
5380 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5381 struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5382 struct sk_buff *skb = tx_buf->skb;
5383 int k, last;
5384
5385 if (skb == NULL) {
5386 j = BNX2_NEXT_TX_BD(j);
5387 continue;
5388 }
5389
5390 dma_unmap_single(&bp->pdev->dev,
5391 dma_unmap_addr(tx_buf, mapping),
5392 skb_headlen(skb),
5393 PCI_DMA_TODEVICE);
5394
5395 tx_buf->skb = NULL;
5396
5397 last = tx_buf->nr_frags;
5398 j = BNX2_NEXT_TX_BD(j);
5399 for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5400 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5401 dma_unmap_page(&bp->pdev->dev,
5402 dma_unmap_addr(tx_buf, mapping),
5403 skb_frag_size(&skb_shinfo(skb)->frags[k]),
5404 PCI_DMA_TODEVICE);
5405 }
5406 dev_kfree_skb(skb);
5407 }
5408 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5409 }
5410 }
5411
5412 static void
5413 bnx2_free_rx_skbs(struct bnx2 *bp)
5414 {
5415 int i;
5416
5417 for (i = 0; i < bp->num_rx_rings; i++) {
5418 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5419 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5420 int j;
5421
5422 if (rxr->rx_buf_ring == NULL)
5423 return;
5424
5425 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5426 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5427 u8 *data = rx_buf->data;
5428
5429 if (data == NULL)
5430 continue;
5431
5432 dma_unmap_single(&bp->pdev->dev,
5433 dma_unmap_addr(rx_buf, mapping),
5434 bp->rx_buf_use_size,
5435 PCI_DMA_FROMDEVICE);
5436
5437 rx_buf->data = NULL;
5438
5439 kfree(data);
5440 }
5441 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5442 bnx2_free_rx_page(bp, rxr, j);
5443 }
5444 }
5445
5446 static void
5447 bnx2_free_skbs(struct bnx2 *bp)
5448 {
5449 bnx2_free_tx_skbs(bp);
5450 bnx2_free_rx_skbs(bp);
5451 }
5452
5453 static int
5454 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5455 {
5456 int rc;
5457
5458 rc = bnx2_reset_chip(bp, reset_code);
5459 bnx2_free_skbs(bp);
5460 if (rc)
5461 return rc;
5462
5463 if ((rc = bnx2_init_chip(bp)) != 0)
5464 return rc;
5465
5466 bnx2_init_all_rings(bp);
5467 return 0;
5468 }
5469
5470 static int
5471 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5472 {
5473 int rc;
5474
5475 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5476 return rc;
5477
5478 spin_lock_bh(&bp->phy_lock);
5479 bnx2_init_phy(bp, reset_phy);
5480 bnx2_set_link(bp);
5481 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5482 bnx2_remote_phy_event(bp);
5483 spin_unlock_bh(&bp->phy_lock);
5484 return 0;
5485 }
5486
5487 static int
5488 bnx2_shutdown_chip(struct bnx2 *bp)
5489 {
5490 u32 reset_code;
5491
5492 if (bp->flags & BNX2_FLAG_NO_WOL)
5493 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5494 else if (bp->wol)
5495 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5496 else
5497 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5498
5499 return bnx2_reset_chip(bp, reset_code);
5500 }
5501
5502 static int
5503 bnx2_test_registers(struct bnx2 *bp)
5504 {
5505 int ret;
5506 int i, is_5709;
5507 static const struct {
5508 u16 offset;
5509 u16 flags;
5510 #define BNX2_FL_NOT_5709 1
5511 u32 rw_mask;
5512 u32 ro_mask;
5513 } reg_tbl[] = {
5514 { 0x006c, 0, 0x00000000, 0x0000003f },
5515 { 0x0090, 0, 0xffffffff, 0x00000000 },
5516 { 0x0094, 0, 0x00000000, 0x00000000 },
5517
5518 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5519 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5520 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5521 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5522 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5523 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5524 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5525 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5526 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5527
5528 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5529 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5530 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5531 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5532 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5533 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5534
5535 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5536 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5537 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5538
5539 { 0x1000, 0, 0x00000000, 0x00000001 },
5540 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5541
5542 { 0x1408, 0, 0x01c00800, 0x00000000 },
5543 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5544 { 0x14a8, 0, 0x00000000, 0x000001ff },
5545 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5546 { 0x14b0, 0, 0x00000002, 0x00000001 },
5547 { 0x14b8, 0, 0x00000000, 0x00000000 },
5548 { 0x14c0, 0, 0x00000000, 0x00000009 },
5549 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5550 { 0x14cc, 0, 0x00000000, 0x00000001 },
5551 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5552
5553 { 0x1800, 0, 0x00000000, 0x00000001 },
5554 { 0x1804, 0, 0x00000000, 0x00000003 },
5555
5556 { 0x2800, 0, 0x00000000, 0x00000001 },
5557 { 0x2804, 0, 0x00000000, 0x00003f01 },
5558 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5559 { 0x2810, 0, 0xffff0000, 0x00000000 },
5560 { 0x2814, 0, 0xffff0000, 0x00000000 },
5561 { 0x2818, 0, 0xffff0000, 0x00000000 },
5562 { 0x281c, 0, 0xffff0000, 0x00000000 },
5563 { 0x2834, 0, 0xffffffff, 0x00000000 },
5564 { 0x2840, 0, 0x00000000, 0xffffffff },
5565 { 0x2844, 0, 0x00000000, 0xffffffff },
5566 { 0x2848, 0, 0xffffffff, 0x00000000 },
5567 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5568
5569 { 0x2c00, 0, 0x00000000, 0x00000011 },
5570 { 0x2c04, 0, 0x00000000, 0x00030007 },
5571
5572 { 0x3c00, 0, 0x00000000, 0x00000001 },
5573 { 0x3c04, 0, 0x00000000, 0x00070000 },
5574 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5575 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5576 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5577 { 0x3c14, 0, 0x00000000, 0xffffffff },
5578 { 0x3c18, 0, 0x00000000, 0xffffffff },
5579 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5580 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5581
5582 { 0x5004, 0, 0x00000000, 0x0000007f },
5583 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5584
5585 { 0x5c00, 0, 0x00000000, 0x00000001 },
5586 { 0x5c04, 0, 0x00000000, 0x0003000f },
5587 { 0x5c08, 0, 0x00000003, 0x00000000 },
5588 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5589 { 0x5c10, 0, 0x00000000, 0xffffffff },
5590 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5591 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5592 { 0x5c88, 0, 0x00000000, 0x00077373 },
5593 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5594
5595 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5596 { 0x680c, 0, 0xffffffff, 0x00000000 },
5597 { 0x6810, 0, 0xffffffff, 0x00000000 },
5598 { 0x6814, 0, 0xffffffff, 0x00000000 },
5599 { 0x6818, 0, 0xffffffff, 0x00000000 },
5600 { 0x681c, 0, 0xffffffff, 0x00000000 },
5601 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5602 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5603 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5604 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5605 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5606 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5607 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5608 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5609 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5610 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5611 { 0x684c, 0, 0xffffffff, 0x00000000 },
5612 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5613 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5614 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5615 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5616 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5617 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5618
5619 { 0xffff, 0, 0x00000000, 0x00000000 },
5620 };
5621
5622 ret = 0;
5623 is_5709 = 0;
5624 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5625 is_5709 = 1;
5626
5627 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5628 u32 offset, rw_mask, ro_mask, save_val, val;
5629 u16 flags = reg_tbl[i].flags;
5630
5631 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5632 continue;
5633
5634 offset = (u32) reg_tbl[i].offset;
5635 rw_mask = reg_tbl[i].rw_mask;
5636 ro_mask = reg_tbl[i].ro_mask;
5637
5638 save_val = readl(bp->regview + offset);
5639
5640 writel(0, bp->regview + offset);
5641
5642 val = readl(bp->regview + offset);
5643 if ((val & rw_mask) != 0) {
5644 goto reg_test_err;
5645 }
5646
5647 if ((val & ro_mask) != (save_val & ro_mask)) {
5648 goto reg_test_err;
5649 }
5650
5651 writel(0xffffffff, bp->regview + offset);
5652
5653 val = readl(bp->regview + offset);
5654 if ((val & rw_mask) != rw_mask) {
5655 goto reg_test_err;
5656 }
5657
5658 if ((val & ro_mask) != (save_val & ro_mask)) {
5659 goto reg_test_err;
5660 }
5661
5662 writel(save_val, bp->regview + offset);
5663 continue;
5664
5665 reg_test_err:
5666 writel(save_val, bp->regview + offset);
5667 ret = -ENODEV;
5668 break;
5669 }
5670 return ret;
5671 }
5672
5673 static int
5674 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5675 {
5676 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5677 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5678 int i;
5679
5680 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5681 u32 offset;
5682
5683 for (offset = 0; offset < size; offset += 4) {
5684
5685 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5686
5687 if (bnx2_reg_rd_ind(bp, start + offset) !=
5688 test_pattern[i]) {
5689 return -ENODEV;
5690 }
5691 }
5692 }
5693 return 0;
5694 }
5695
5696 static int
5697 bnx2_test_memory(struct bnx2 *bp)
5698 {
5699 int ret = 0;
5700 int i;
5701 static struct mem_entry {
5702 u32 offset;
5703 u32 len;
5704 } mem_tbl_5706[] = {
5705 { 0x60000, 0x4000 },
5706 { 0xa0000, 0x3000 },
5707 { 0xe0000, 0x4000 },
5708 { 0x120000, 0x4000 },
5709 { 0x1a0000, 0x4000 },
5710 { 0x160000, 0x4000 },
5711 { 0xffffffff, 0 },
5712 },
5713 mem_tbl_5709[] = {
5714 { 0x60000, 0x4000 },
5715 { 0xa0000, 0x3000 },
5716 { 0xe0000, 0x4000 },
5717 { 0x120000, 0x4000 },
5718 { 0x1a0000, 0x4000 },
5719 { 0xffffffff, 0 },
5720 };
5721 struct mem_entry *mem_tbl;
5722
5723 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5724 mem_tbl = mem_tbl_5709;
5725 else
5726 mem_tbl = mem_tbl_5706;
5727
5728 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5729 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5730 mem_tbl[i].len)) != 0) {
5731 return ret;
5732 }
5733 }
5734
5735 return ret;
5736 }
5737
5738 #define BNX2_MAC_LOOPBACK 0
5739 #define BNX2_PHY_LOOPBACK 1
5740
5741 static int
5742 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5743 {
5744 unsigned int pkt_size, num_pkts, i;
5745 struct sk_buff *skb;
5746 u8 *data;
5747 unsigned char *packet;
5748 u16 rx_start_idx, rx_idx;
5749 dma_addr_t map;
5750 struct bnx2_tx_bd *txbd;
5751 struct bnx2_sw_bd *rx_buf;
5752 struct l2_fhdr *rx_hdr;
5753 int ret = -ENODEV;
5754 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5755 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5756 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5757
5758 tx_napi = bnapi;
5759
5760 txr = &tx_napi->tx_ring;
5761 rxr = &bnapi->rx_ring;
5762 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5763 bp->loopback = MAC_LOOPBACK;
5764 bnx2_set_mac_loopback(bp);
5765 }
5766 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5767 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5768 return 0;
5769
5770 bp->loopback = PHY_LOOPBACK;
5771 bnx2_set_phy_loopback(bp);
5772 }
5773 else
5774 return -EINVAL;
5775
5776 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5777 skb = netdev_alloc_skb(bp->dev, pkt_size);
5778 if (!skb)
5779 return -ENOMEM;
5780 packet = skb_put(skb, pkt_size);
5781 memcpy(packet, bp->dev->dev_addr, 6);
5782 memset(packet + 6, 0x0, 8);
5783 for (i = 14; i < pkt_size; i++)
5784 packet[i] = (unsigned char) (i & 0xff);
5785
5786 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5787 PCI_DMA_TODEVICE);
5788 if (dma_mapping_error(&bp->pdev->dev, map)) {
5789 dev_kfree_skb(skb);
5790 return -EIO;
5791 }
5792
5793 BNX2_WR(bp, BNX2_HC_COMMAND,
5794 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5795
5796 BNX2_RD(bp, BNX2_HC_COMMAND);
5797
5798 udelay(5);
5799 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5800
5801 num_pkts = 0;
5802
5803 txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5804
5805 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5806 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5807 txbd->tx_bd_mss_nbytes = pkt_size;
5808 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5809
5810 num_pkts++;
5811 txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5812 txr->tx_prod_bseq += pkt_size;
5813
5814 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5815 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5816
5817 udelay(100);
5818
5819 BNX2_WR(bp, BNX2_HC_COMMAND,
5820 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5821
5822 BNX2_RD(bp, BNX2_HC_COMMAND);
5823
5824 udelay(5);
5825
5826 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5827 dev_kfree_skb(skb);
5828
5829 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5830 goto loopback_test_done;
5831
5832 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5833 if (rx_idx != rx_start_idx + num_pkts) {
5834 goto loopback_test_done;
5835 }
5836
5837 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5838 data = rx_buf->data;
5839
5840 rx_hdr = get_l2_fhdr(data);
5841 data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5842
5843 dma_sync_single_for_cpu(&bp->pdev->dev,
5844 dma_unmap_addr(rx_buf, mapping),
5845 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5846
5847 if (rx_hdr->l2_fhdr_status &
5848 (L2_FHDR_ERRORS_BAD_CRC |
5849 L2_FHDR_ERRORS_PHY_DECODE |
5850 L2_FHDR_ERRORS_ALIGNMENT |
5851 L2_FHDR_ERRORS_TOO_SHORT |
5852 L2_FHDR_ERRORS_GIANT_FRAME)) {
5853
5854 goto loopback_test_done;
5855 }
5856
5857 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5858 goto loopback_test_done;
5859 }
5860
5861 for (i = 14; i < pkt_size; i++) {
5862 if (*(data + i) != (unsigned char) (i & 0xff)) {
5863 goto loopback_test_done;
5864 }
5865 }
5866
5867 ret = 0;
5868
5869 loopback_test_done:
5870 bp->loopback = 0;
5871 return ret;
5872 }
5873
5874 #define BNX2_MAC_LOOPBACK_FAILED 1
5875 #define BNX2_PHY_LOOPBACK_FAILED 2
5876 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5877 BNX2_PHY_LOOPBACK_FAILED)
5878
5879 static int
5880 bnx2_test_loopback(struct bnx2 *bp)
5881 {
5882 int rc = 0;
5883
5884 if (!netif_running(bp->dev))
5885 return BNX2_LOOPBACK_FAILED;
5886
5887 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5888 spin_lock_bh(&bp->phy_lock);
5889 bnx2_init_phy(bp, 1);
5890 spin_unlock_bh(&bp->phy_lock);
5891 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5892 rc |= BNX2_MAC_LOOPBACK_FAILED;
5893 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5894 rc |= BNX2_PHY_LOOPBACK_FAILED;
5895 return rc;
5896 }
5897
5898 #define NVRAM_SIZE 0x200
5899 #define CRC32_RESIDUAL 0xdebb20e3
5900
5901 static int
5902 bnx2_test_nvram(struct bnx2 *bp)
5903 {
5904 __be32 buf[NVRAM_SIZE / 4];
5905 u8 *data = (u8 *) buf;
5906 int rc = 0;
5907 u32 magic, csum;
5908
5909 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5910 goto test_nvram_done;
5911
5912 magic = be32_to_cpu(buf[0]);
5913 if (magic != 0x669955aa) {
5914 rc = -ENODEV;
5915 goto test_nvram_done;
5916 }
5917
5918 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5919 goto test_nvram_done;
5920
5921 csum = ether_crc_le(0x100, data);
5922 if (csum != CRC32_RESIDUAL) {
5923 rc = -ENODEV;
5924 goto test_nvram_done;
5925 }
5926
5927 csum = ether_crc_le(0x100, data + 0x100);
5928 if (csum != CRC32_RESIDUAL) {
5929 rc = -ENODEV;
5930 }
5931
5932 test_nvram_done:
5933 return rc;
5934 }
5935
5936 static int
5937 bnx2_test_link(struct bnx2 *bp)
5938 {
5939 u32 bmsr;
5940
5941 if (!netif_running(bp->dev))
5942 return -ENODEV;
5943
5944 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5945 if (bp->link_up)
5946 return 0;
5947 return -ENODEV;
5948 }
5949 spin_lock_bh(&bp->phy_lock);
5950 bnx2_enable_bmsr1(bp);
5951 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5952 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5953 bnx2_disable_bmsr1(bp);
5954 spin_unlock_bh(&bp->phy_lock);
5955
5956 if (bmsr & BMSR_LSTATUS) {
5957 return 0;
5958 }
5959 return -ENODEV;
5960 }
5961
5962 static int
5963 bnx2_test_intr(struct bnx2 *bp)
5964 {
5965 int i;
5966 u16 status_idx;
5967
5968 if (!netif_running(bp->dev))
5969 return -ENODEV;
5970
5971 status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5972
5973 /* This register is not touched during run-time. */
5974 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5975 BNX2_RD(bp, BNX2_HC_COMMAND);
5976
5977 for (i = 0; i < 10; i++) {
5978 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5979 status_idx) {
5980
5981 break;
5982 }
5983
5984 msleep_interruptible(10);
5985 }
5986 if (i < 10)
5987 return 0;
5988
5989 return -ENODEV;
5990 }
5991
5992 /* Determining link for parallel detection. */
5993 static int
5994 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5995 {
5996 u32 mode_ctl, an_dbg, exp;
5997
5998 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5999 return 0;
6000
6001 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6002 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6003
6004 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6005 return 0;
6006
6007 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6008 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6009 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6010
6011 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6012 return 0;
6013
6014 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6015 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6016 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6017
6018 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
6019 return 0;
6020
6021 return 1;
6022 }
6023
6024 static void
6025 bnx2_5706_serdes_timer(struct bnx2 *bp)
6026 {
6027 int check_link = 1;
6028
6029 spin_lock(&bp->phy_lock);
6030 if (bp->serdes_an_pending) {
6031 bp->serdes_an_pending--;
6032 check_link = 0;
6033 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6034 u32 bmcr;
6035
6036 bp->current_interval = BNX2_TIMER_INTERVAL;
6037
6038 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6039
6040 if (bmcr & BMCR_ANENABLE) {
6041 if (bnx2_5706_serdes_has_link(bp)) {
6042 bmcr &= ~BMCR_ANENABLE;
6043 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6044 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6045 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6046 }
6047 }
6048 }
6049 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6050 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6051 u32 phy2;
6052
6053 bnx2_write_phy(bp, 0x17, 0x0f01);
6054 bnx2_read_phy(bp, 0x15, &phy2);
6055 if (phy2 & 0x20) {
6056 u32 bmcr;
6057
6058 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6059 bmcr |= BMCR_ANENABLE;
6060 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6061
6062 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6063 }
6064 } else
6065 bp->current_interval = BNX2_TIMER_INTERVAL;
6066
6067 if (check_link) {
6068 u32 val;
6069
6070 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6071 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6072 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6073
6074 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6075 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6076 bnx2_5706s_force_link_dn(bp, 1);
6077 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6078 } else
6079 bnx2_set_link(bp);
6080 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6081 bnx2_set_link(bp);
6082 }
6083 spin_unlock(&bp->phy_lock);
6084 }
6085
6086 static void
6087 bnx2_5708_serdes_timer(struct bnx2 *bp)
6088 {
6089 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6090 return;
6091
6092 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6093 bp->serdes_an_pending = 0;
6094 return;
6095 }
6096
6097 spin_lock(&bp->phy_lock);
6098 if (bp->serdes_an_pending)
6099 bp->serdes_an_pending--;
6100 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6101 u32 bmcr;
6102
6103 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6104 if (bmcr & BMCR_ANENABLE) {
6105 bnx2_enable_forced_2g5(bp);
6106 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6107 } else {
6108 bnx2_disable_forced_2g5(bp);
6109 bp->serdes_an_pending = 2;
6110 bp->current_interval = BNX2_TIMER_INTERVAL;
6111 }
6112
6113 } else
6114 bp->current_interval = BNX2_TIMER_INTERVAL;
6115
6116 spin_unlock(&bp->phy_lock);
6117 }
6118
6119 static void
6120 bnx2_timer(unsigned long data)
6121 {
6122 struct bnx2 *bp = (struct bnx2 *) data;
6123
6124 if (!netif_running(bp->dev))
6125 return;
6126
6127 if (atomic_read(&bp->intr_sem) != 0)
6128 goto bnx2_restart_timer;
6129
6130 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6131 BNX2_FLAG_USING_MSI)
6132 bnx2_chk_missed_msi(bp);
6133
6134 bnx2_send_heart_beat(bp);
6135
6136 bp->stats_blk->stat_FwRxDrop =
6137 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6138
6139 /* workaround occasional corrupted counters */
6140 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6141 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6142 BNX2_HC_COMMAND_STATS_NOW);
6143
6144 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6145 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6146 bnx2_5706_serdes_timer(bp);
6147 else
6148 bnx2_5708_serdes_timer(bp);
6149 }
6150
6151 bnx2_restart_timer:
6152 mod_timer(&bp->timer, jiffies + bp->current_interval);
6153 }
6154
6155 static int
6156 bnx2_request_irq(struct bnx2 *bp)
6157 {
6158 unsigned long flags;
6159 struct bnx2_irq *irq;
6160 int rc = 0, i;
6161
6162 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6163 flags = 0;
6164 else
6165 flags = IRQF_SHARED;
6166
6167 for (i = 0; i < bp->irq_nvecs; i++) {
6168 irq = &bp->irq_tbl[i];
6169 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6170 &bp->bnx2_napi[i]);
6171 if (rc)
6172 break;
6173 irq->requested = 1;
6174 }
6175 return rc;
6176 }
6177
6178 static void
6179 __bnx2_free_irq(struct bnx2 *bp)
6180 {
6181 struct bnx2_irq *irq;
6182 int i;
6183
6184 for (i = 0; i < bp->irq_nvecs; i++) {
6185 irq = &bp->irq_tbl[i];
6186 if (irq->requested)
6187 free_irq(irq->vector, &bp->bnx2_napi[i]);
6188 irq->requested = 0;
6189 }
6190 }
6191
6192 static void
6193 bnx2_free_irq(struct bnx2 *bp)
6194 {
6195
6196 __bnx2_free_irq(bp);
6197 if (bp->flags & BNX2_FLAG_USING_MSI)
6198 pci_disable_msi(bp->pdev);
6199 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6200 pci_disable_msix(bp->pdev);
6201
6202 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6203 }
6204
6205 static void
6206 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6207 {
6208 int i, total_vecs, rc;
6209 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6210 struct net_device *dev = bp->dev;
6211 const int len = sizeof(bp->irq_tbl[0].name);
6212
6213 bnx2_setup_msix_tbl(bp);
6214 BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6215 BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6216 BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6217
6218 /* Need to flush the previous three writes to ensure MSI-X
6219 * is setup properly */
6220 BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6221
6222 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6223 msix_ent[i].entry = i;
6224 msix_ent[i].vector = 0;
6225 }
6226
6227 total_vecs = msix_vecs;
6228 #ifdef BCM_CNIC
6229 total_vecs++;
6230 #endif
6231 rc = -ENOSPC;
6232 while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6233 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6234 if (rc <= 0)
6235 break;
6236 if (rc > 0)
6237 total_vecs = rc;
6238 }
6239
6240 if (rc != 0)
6241 return;
6242
6243 msix_vecs = total_vecs;
6244 #ifdef BCM_CNIC
6245 msix_vecs--;
6246 #endif
6247 bp->irq_nvecs = msix_vecs;
6248 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6249 for (i = 0; i < total_vecs; i++) {
6250 bp->irq_tbl[i].vector = msix_ent[i].vector;
6251 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6252 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6253 }
6254 }
6255
6256 static int
6257 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6258 {
6259 int cpus = netif_get_num_default_rss_queues();
6260 int msix_vecs;
6261
6262 if (!bp->num_req_rx_rings)
6263 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6264 else if (!bp->num_req_tx_rings)
6265 msix_vecs = max(cpus, bp->num_req_rx_rings);
6266 else
6267 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6268
6269 msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6270
6271 bp->irq_tbl[0].handler = bnx2_interrupt;
6272 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6273 bp->irq_nvecs = 1;
6274 bp->irq_tbl[0].vector = bp->pdev->irq;
6275
6276 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6277 bnx2_enable_msix(bp, msix_vecs);
6278
6279 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6280 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6281 if (pci_enable_msi(bp->pdev) == 0) {
6282 bp->flags |= BNX2_FLAG_USING_MSI;
6283 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6284 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6285 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6286 } else
6287 bp->irq_tbl[0].handler = bnx2_msi;
6288
6289 bp->irq_tbl[0].vector = bp->pdev->irq;
6290 }
6291 }
6292
6293 if (!bp->num_req_tx_rings)
6294 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6295 else
6296 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6297
6298 if (!bp->num_req_rx_rings)
6299 bp->num_rx_rings = bp->irq_nvecs;
6300 else
6301 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6302
6303 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6304
6305 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6306 }
6307
6308 /* Called with rtnl_lock */
6309 static int
6310 bnx2_open(struct net_device *dev)
6311 {
6312 struct bnx2 *bp = netdev_priv(dev);
6313 int rc;
6314
6315 rc = bnx2_request_firmware(bp);
6316 if (rc < 0)
6317 goto out;
6318
6319 netif_carrier_off(dev);
6320
6321 bnx2_set_power_state(bp, PCI_D0);
6322 bnx2_disable_int(bp);
6323
6324 rc = bnx2_setup_int_mode(bp, disable_msi);
6325 if (rc)
6326 goto open_err;
6327 bnx2_init_napi(bp);
6328 bnx2_napi_enable(bp);
6329 rc = bnx2_alloc_mem(bp);
6330 if (rc)
6331 goto open_err;
6332
6333 rc = bnx2_request_irq(bp);
6334 if (rc)
6335 goto open_err;
6336
6337 rc = bnx2_init_nic(bp, 1);
6338 if (rc)
6339 goto open_err;
6340
6341 mod_timer(&bp->timer, jiffies + bp->current_interval);
6342
6343 atomic_set(&bp->intr_sem, 0);
6344
6345 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6346
6347 bnx2_enable_int(bp);
6348
6349 if (bp->flags & BNX2_FLAG_USING_MSI) {
6350 /* Test MSI to make sure it is working
6351 * If MSI test fails, go back to INTx mode
6352 */
6353 if (bnx2_test_intr(bp) != 0) {
6354 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6355
6356 bnx2_disable_int(bp);
6357 bnx2_free_irq(bp);
6358
6359 bnx2_setup_int_mode(bp, 1);
6360
6361 rc = bnx2_init_nic(bp, 0);
6362
6363 if (!rc)
6364 rc = bnx2_request_irq(bp);
6365
6366 if (rc) {
6367 del_timer_sync(&bp->timer);
6368 goto open_err;
6369 }
6370 bnx2_enable_int(bp);
6371 }
6372 }
6373 if (bp->flags & BNX2_FLAG_USING_MSI)
6374 netdev_info(dev, "using MSI\n");
6375 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6376 netdev_info(dev, "using MSIX\n");
6377
6378 netif_tx_start_all_queues(dev);
6379 out:
6380 return rc;
6381
6382 open_err:
6383 bnx2_napi_disable(bp);
6384 bnx2_free_skbs(bp);
6385 bnx2_free_irq(bp);
6386 bnx2_free_mem(bp);
6387 bnx2_del_napi(bp);
6388 bnx2_release_firmware(bp);
6389 goto out;
6390 }
6391
6392 static void
6393 bnx2_reset_task(struct work_struct *work)
6394 {
6395 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6396 int rc;
6397 u16 pcicmd;
6398
6399 rtnl_lock();
6400 if (!netif_running(bp->dev)) {
6401 rtnl_unlock();
6402 return;
6403 }
6404
6405 bnx2_netif_stop(bp, true);
6406
6407 pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6408 if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6409 /* in case PCI block has reset */
6410 pci_restore_state(bp->pdev);
6411 pci_save_state(bp->pdev);
6412 }
6413 rc = bnx2_init_nic(bp, 1);
6414 if (rc) {
6415 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6416 bnx2_napi_enable(bp);
6417 dev_close(bp->dev);
6418 rtnl_unlock();
6419 return;
6420 }
6421
6422 atomic_set(&bp->intr_sem, 1);
6423 bnx2_netif_start(bp, true);
6424 rtnl_unlock();
6425 }
6426
6427 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6428
6429 static void
6430 bnx2_dump_ftq(struct bnx2 *bp)
6431 {
6432 int i;
6433 u32 reg, bdidx, cid, valid;
6434 struct net_device *dev = bp->dev;
6435 static const struct ftq_reg {
6436 char *name;
6437 u32 off;
6438 } ftq_arr[] = {
6439 BNX2_FTQ_ENTRY(RV2P_P),
6440 BNX2_FTQ_ENTRY(RV2P_T),
6441 BNX2_FTQ_ENTRY(RV2P_M),
6442 BNX2_FTQ_ENTRY(TBDR_),
6443 BNX2_FTQ_ENTRY(TDMA_),
6444 BNX2_FTQ_ENTRY(TXP_),
6445 BNX2_FTQ_ENTRY(TXP_),
6446 BNX2_FTQ_ENTRY(TPAT_),
6447 BNX2_FTQ_ENTRY(RXP_C),
6448 BNX2_FTQ_ENTRY(RXP_),
6449 BNX2_FTQ_ENTRY(COM_COMXQ_),
6450 BNX2_FTQ_ENTRY(COM_COMTQ_),
6451 BNX2_FTQ_ENTRY(COM_COMQ_),
6452 BNX2_FTQ_ENTRY(CP_CPQ_),
6453 };
6454
6455 netdev_err(dev, "<--- start FTQ dump --->\n");
6456 for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6457 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6458 bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6459
6460 netdev_err(dev, "CPU states:\n");
6461 for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6462 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6463 reg, bnx2_reg_rd_ind(bp, reg),
6464 bnx2_reg_rd_ind(bp, reg + 4),
6465 bnx2_reg_rd_ind(bp, reg + 8),
6466 bnx2_reg_rd_ind(bp, reg + 0x1c),
6467 bnx2_reg_rd_ind(bp, reg + 0x1c),
6468 bnx2_reg_rd_ind(bp, reg + 0x20));
6469
6470 netdev_err(dev, "<--- end FTQ dump --->\n");
6471 netdev_err(dev, "<--- start TBDC dump --->\n");
6472 netdev_err(dev, "TBDC free cnt: %ld\n",
6473 BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6474 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6475 for (i = 0; i < 0x20; i++) {
6476 int j = 0;
6477
6478 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6479 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6480 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6481 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6482 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6483 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6484 j++;
6485
6486 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6487 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6488 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6489 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6490 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6491 bdidx >> 24, (valid >> 8) & 0x0ff);
6492 }
6493 netdev_err(dev, "<--- end TBDC dump --->\n");
6494 }
6495
6496 static void
6497 bnx2_dump_state(struct bnx2 *bp)
6498 {
6499 struct net_device *dev = bp->dev;
6500 u32 val1, val2;
6501
6502 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6503 netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6504 atomic_read(&bp->intr_sem), val1);
6505 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6506 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6507 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6508 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6509 BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6510 BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6511 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6512 BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6513 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6514 BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6515 if (bp->flags & BNX2_FLAG_USING_MSIX)
6516 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6517 BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6518 }
6519
6520 static void
6521 bnx2_tx_timeout(struct net_device *dev)
6522 {
6523 struct bnx2 *bp = netdev_priv(dev);
6524
6525 bnx2_dump_ftq(bp);
6526 bnx2_dump_state(bp);
6527 bnx2_dump_mcp_state(bp);
6528
6529 /* This allows the netif to be shutdown gracefully before resetting */
6530 schedule_work(&bp->reset_task);
6531 }
6532
6533 /* Called with netif_tx_lock.
6534 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6535 * netif_wake_queue().
6536 */
6537 static netdev_tx_t
6538 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6539 {
6540 struct bnx2 *bp = netdev_priv(dev);
6541 dma_addr_t mapping;
6542 struct bnx2_tx_bd *txbd;
6543 struct bnx2_sw_tx_bd *tx_buf;
6544 u32 len, vlan_tag_flags, last_frag, mss;
6545 u16 prod, ring_prod;
6546 int i;
6547 struct bnx2_napi *bnapi;
6548 struct bnx2_tx_ring_info *txr;
6549 struct netdev_queue *txq;
6550
6551 /* Determine which tx ring we will be placed on */
6552 i = skb_get_queue_mapping(skb);
6553 bnapi = &bp->bnx2_napi[i];
6554 txr = &bnapi->tx_ring;
6555 txq = netdev_get_tx_queue(dev, i);
6556
6557 if (unlikely(bnx2_tx_avail(bp, txr) <
6558 (skb_shinfo(skb)->nr_frags + 1))) {
6559 netif_tx_stop_queue(txq);
6560 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6561
6562 return NETDEV_TX_BUSY;
6563 }
6564 len = skb_headlen(skb);
6565 prod = txr->tx_prod;
6566 ring_prod = BNX2_TX_RING_IDX(prod);
6567
6568 vlan_tag_flags = 0;
6569 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6570 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6571 }
6572
6573 if (vlan_tx_tag_present(skb)) {
6574 vlan_tag_flags |=
6575 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6576 }
6577
6578 if ((mss = skb_shinfo(skb)->gso_size)) {
6579 u32 tcp_opt_len;
6580 struct iphdr *iph;
6581
6582 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6583
6584 tcp_opt_len = tcp_optlen(skb);
6585
6586 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6587 u32 tcp_off = skb_transport_offset(skb) -
6588 sizeof(struct ipv6hdr) - ETH_HLEN;
6589
6590 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6591 TX_BD_FLAGS_SW_FLAGS;
6592 if (likely(tcp_off == 0))
6593 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6594 else {
6595 tcp_off >>= 3;
6596 vlan_tag_flags |= ((tcp_off & 0x3) <<
6597 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6598 ((tcp_off & 0x10) <<
6599 TX_BD_FLAGS_TCP6_OFF4_SHL);
6600 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6601 }
6602 } else {
6603 iph = ip_hdr(skb);
6604 if (tcp_opt_len || (iph->ihl > 5)) {
6605 vlan_tag_flags |= ((iph->ihl - 5) +
6606 (tcp_opt_len >> 2)) << 8;
6607 }
6608 }
6609 } else
6610 mss = 0;
6611
6612 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6613 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6614 dev_kfree_skb(skb);
6615 return NETDEV_TX_OK;
6616 }
6617
6618 tx_buf = &txr->tx_buf_ring[ring_prod];
6619 tx_buf->skb = skb;
6620 dma_unmap_addr_set(tx_buf, mapping, mapping);
6621
6622 txbd = &txr->tx_desc_ring[ring_prod];
6623
6624 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6625 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6626 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6627 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6628
6629 last_frag = skb_shinfo(skb)->nr_frags;
6630 tx_buf->nr_frags = last_frag;
6631 tx_buf->is_gso = skb_is_gso(skb);
6632
6633 for (i = 0; i < last_frag; i++) {
6634 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6635
6636 prod = BNX2_NEXT_TX_BD(prod);
6637 ring_prod = BNX2_TX_RING_IDX(prod);
6638 txbd = &txr->tx_desc_ring[ring_prod];
6639
6640 len = skb_frag_size(frag);
6641 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6642 DMA_TO_DEVICE);
6643 if (dma_mapping_error(&bp->pdev->dev, mapping))
6644 goto dma_error;
6645 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6646 mapping);
6647
6648 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6649 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6650 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6651 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6652
6653 }
6654 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6655
6656 /* Sync BD data before updating TX mailbox */
6657 wmb();
6658
6659 netdev_tx_sent_queue(txq, skb->len);
6660
6661 prod = BNX2_NEXT_TX_BD(prod);
6662 txr->tx_prod_bseq += skb->len;
6663
6664 BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6665 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6666
6667 mmiowb();
6668
6669 txr->tx_prod = prod;
6670
6671 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6672 netif_tx_stop_queue(txq);
6673
6674 /* netif_tx_stop_queue() must be done before checking
6675 * tx index in bnx2_tx_avail() below, because in
6676 * bnx2_tx_int(), we update tx index before checking for
6677 * netif_tx_queue_stopped().
6678 */
6679 smp_mb();
6680 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6681 netif_tx_wake_queue(txq);
6682 }
6683
6684 return NETDEV_TX_OK;
6685 dma_error:
6686 /* save value of frag that failed */
6687 last_frag = i;
6688
6689 /* start back at beginning and unmap skb */
6690 prod = txr->tx_prod;
6691 ring_prod = BNX2_TX_RING_IDX(prod);
6692 tx_buf = &txr->tx_buf_ring[ring_prod];
6693 tx_buf->skb = NULL;
6694 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6695 skb_headlen(skb), PCI_DMA_TODEVICE);
6696
6697 /* unmap remaining mapped pages */
6698 for (i = 0; i < last_frag; i++) {
6699 prod = BNX2_NEXT_TX_BD(prod);
6700 ring_prod = BNX2_TX_RING_IDX(prod);
6701 tx_buf = &txr->tx_buf_ring[ring_prod];
6702 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6703 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6704 PCI_DMA_TODEVICE);
6705 }
6706
6707 dev_kfree_skb(skb);
6708 return NETDEV_TX_OK;
6709 }
6710
6711 /* Called with rtnl_lock */
6712 static int
6713 bnx2_close(struct net_device *dev)
6714 {
6715 struct bnx2 *bp = netdev_priv(dev);
6716
6717 bnx2_disable_int_sync(bp);
6718 bnx2_napi_disable(bp);
6719 netif_tx_disable(dev);
6720 del_timer_sync(&bp->timer);
6721 bnx2_shutdown_chip(bp);
6722 bnx2_free_irq(bp);
6723 bnx2_free_skbs(bp);
6724 bnx2_free_mem(bp);
6725 bnx2_del_napi(bp);
6726 bp->link_up = 0;
6727 netif_carrier_off(bp->dev);
6728 bnx2_set_power_state(bp, PCI_D3hot);
6729 return 0;
6730 }
6731
6732 static void
6733 bnx2_save_stats(struct bnx2 *bp)
6734 {
6735 u32 *hw_stats = (u32 *) bp->stats_blk;
6736 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6737 int i;
6738
6739 /* The 1st 10 counters are 64-bit counters */
6740 for (i = 0; i < 20; i += 2) {
6741 u32 hi;
6742 u64 lo;
6743
6744 hi = temp_stats[i] + hw_stats[i];
6745 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6746 if (lo > 0xffffffff)
6747 hi++;
6748 temp_stats[i] = hi;
6749 temp_stats[i + 1] = lo & 0xffffffff;
6750 }
6751
6752 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6753 temp_stats[i] += hw_stats[i];
6754 }
6755
6756 #define GET_64BIT_NET_STATS64(ctr) \
6757 (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6758
6759 #define GET_64BIT_NET_STATS(ctr) \
6760 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6761 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6762
6763 #define GET_32BIT_NET_STATS(ctr) \
6764 (unsigned long) (bp->stats_blk->ctr + \
6765 bp->temp_stats_blk->ctr)
6766
6767 static struct rtnl_link_stats64 *
6768 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6769 {
6770 struct bnx2 *bp = netdev_priv(dev);
6771
6772 if (bp->stats_blk == NULL)
6773 return net_stats;
6774
6775 net_stats->rx_packets =
6776 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6777 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6778 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6779
6780 net_stats->tx_packets =
6781 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6782 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6783 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6784
6785 net_stats->rx_bytes =
6786 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6787
6788 net_stats->tx_bytes =
6789 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6790
6791 net_stats->multicast =
6792 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6793
6794 net_stats->collisions =
6795 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6796
6797 net_stats->rx_length_errors =
6798 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6799 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6800
6801 net_stats->rx_over_errors =
6802 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6803 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6804
6805 net_stats->rx_frame_errors =
6806 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6807
6808 net_stats->rx_crc_errors =
6809 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6810
6811 net_stats->rx_errors = net_stats->rx_length_errors +
6812 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6813 net_stats->rx_crc_errors;
6814
6815 net_stats->tx_aborted_errors =
6816 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6817 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6818
6819 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6820 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6821 net_stats->tx_carrier_errors = 0;
6822 else {
6823 net_stats->tx_carrier_errors =
6824 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6825 }
6826
6827 net_stats->tx_errors =
6828 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6829 net_stats->tx_aborted_errors +
6830 net_stats->tx_carrier_errors;
6831
6832 net_stats->rx_missed_errors =
6833 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6834 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6835 GET_32BIT_NET_STATS(stat_FwRxDrop);
6836
6837 return net_stats;
6838 }
6839
6840 /* All ethtool functions called with rtnl_lock */
6841
6842 static int
6843 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6844 {
6845 struct bnx2 *bp = netdev_priv(dev);
6846 int support_serdes = 0, support_copper = 0;
6847
6848 cmd->supported = SUPPORTED_Autoneg;
6849 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6850 support_serdes = 1;
6851 support_copper = 1;
6852 } else if (bp->phy_port == PORT_FIBRE)
6853 support_serdes = 1;
6854 else
6855 support_copper = 1;
6856
6857 if (support_serdes) {
6858 cmd->supported |= SUPPORTED_1000baseT_Full |
6859 SUPPORTED_FIBRE;
6860 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6861 cmd->supported |= SUPPORTED_2500baseX_Full;
6862
6863 }
6864 if (support_copper) {
6865 cmd->supported |= SUPPORTED_10baseT_Half |
6866 SUPPORTED_10baseT_Full |
6867 SUPPORTED_100baseT_Half |
6868 SUPPORTED_100baseT_Full |
6869 SUPPORTED_1000baseT_Full |
6870 SUPPORTED_TP;
6871
6872 }
6873
6874 spin_lock_bh(&bp->phy_lock);
6875 cmd->port = bp->phy_port;
6876 cmd->advertising = bp->advertising;
6877
6878 if (bp->autoneg & AUTONEG_SPEED) {
6879 cmd->autoneg = AUTONEG_ENABLE;
6880 } else {
6881 cmd->autoneg = AUTONEG_DISABLE;
6882 }
6883
6884 if (netif_carrier_ok(dev)) {
6885 ethtool_cmd_speed_set(cmd, bp->line_speed);
6886 cmd->duplex = bp->duplex;
6887 }
6888 else {
6889 ethtool_cmd_speed_set(cmd, -1);
6890 cmd->duplex = -1;
6891 }
6892 spin_unlock_bh(&bp->phy_lock);
6893
6894 cmd->transceiver = XCVR_INTERNAL;
6895 cmd->phy_address = bp->phy_addr;
6896
6897 return 0;
6898 }
6899
6900 static int
6901 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6902 {
6903 struct bnx2 *bp = netdev_priv(dev);
6904 u8 autoneg = bp->autoneg;
6905 u8 req_duplex = bp->req_duplex;
6906 u16 req_line_speed = bp->req_line_speed;
6907 u32 advertising = bp->advertising;
6908 int err = -EINVAL;
6909
6910 spin_lock_bh(&bp->phy_lock);
6911
6912 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6913 goto err_out_unlock;
6914
6915 if (cmd->port != bp->phy_port &&
6916 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6917 goto err_out_unlock;
6918
6919 /* If device is down, we can store the settings only if the user
6920 * is setting the currently active port.
6921 */
6922 if (!netif_running(dev) && cmd->port != bp->phy_port)
6923 goto err_out_unlock;
6924
6925 if (cmd->autoneg == AUTONEG_ENABLE) {
6926 autoneg |= AUTONEG_SPEED;
6927
6928 advertising = cmd->advertising;
6929 if (cmd->port == PORT_TP) {
6930 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6931 if (!advertising)
6932 advertising = ETHTOOL_ALL_COPPER_SPEED;
6933 } else {
6934 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6935 if (!advertising)
6936 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6937 }
6938 advertising |= ADVERTISED_Autoneg;
6939 }
6940 else {
6941 u32 speed = ethtool_cmd_speed(cmd);
6942 if (cmd->port == PORT_FIBRE) {
6943 if ((speed != SPEED_1000 &&
6944 speed != SPEED_2500) ||
6945 (cmd->duplex != DUPLEX_FULL))
6946 goto err_out_unlock;
6947
6948 if (speed == SPEED_2500 &&
6949 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6950 goto err_out_unlock;
6951 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6952 goto err_out_unlock;
6953
6954 autoneg &= ~AUTONEG_SPEED;
6955 req_line_speed = speed;
6956 req_duplex = cmd->duplex;
6957 advertising = 0;
6958 }
6959
6960 bp->autoneg = autoneg;
6961 bp->advertising = advertising;
6962 bp->req_line_speed = req_line_speed;
6963 bp->req_duplex = req_duplex;
6964
6965 err = 0;
6966 /* If device is down, the new settings will be picked up when it is
6967 * brought up.
6968 */
6969 if (netif_running(dev))
6970 err = bnx2_setup_phy(bp, cmd->port);
6971
6972 err_out_unlock:
6973 spin_unlock_bh(&bp->phy_lock);
6974
6975 return err;
6976 }
6977
6978 static void
6979 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6980 {
6981 struct bnx2 *bp = netdev_priv(dev);
6982
6983 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6984 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6985 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
6986 strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
6987 }
6988
6989 #define BNX2_REGDUMP_LEN (32 * 1024)
6990
6991 static int
6992 bnx2_get_regs_len(struct net_device *dev)
6993 {
6994 return BNX2_REGDUMP_LEN;
6995 }
6996
6997 static void
6998 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6999 {
7000 u32 *p = _p, i, offset;
7001 u8 *orig_p = _p;
7002 struct bnx2 *bp = netdev_priv(dev);
7003 static const u32 reg_boundaries[] = {
7004 0x0000, 0x0098, 0x0400, 0x045c,
7005 0x0800, 0x0880, 0x0c00, 0x0c10,
7006 0x0c30, 0x0d08, 0x1000, 0x101c,
7007 0x1040, 0x1048, 0x1080, 0x10a4,
7008 0x1400, 0x1490, 0x1498, 0x14f0,
7009 0x1500, 0x155c, 0x1580, 0x15dc,
7010 0x1600, 0x1658, 0x1680, 0x16d8,
7011 0x1800, 0x1820, 0x1840, 0x1854,
7012 0x1880, 0x1894, 0x1900, 0x1984,
7013 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7014 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7015 0x2000, 0x2030, 0x23c0, 0x2400,
7016 0x2800, 0x2820, 0x2830, 0x2850,
7017 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7018 0x3c00, 0x3c94, 0x4000, 0x4010,
7019 0x4080, 0x4090, 0x43c0, 0x4458,
7020 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7021 0x4fc0, 0x5010, 0x53c0, 0x5444,
7022 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7023 0x5fc0, 0x6000, 0x6400, 0x6428,
7024 0x6800, 0x6848, 0x684c, 0x6860,
7025 0x6888, 0x6910, 0x8000
7026 };
7027
7028 regs->version = 0;
7029
7030 memset(p, 0, BNX2_REGDUMP_LEN);
7031
7032 if (!netif_running(bp->dev))
7033 return;
7034
7035 i = 0;
7036 offset = reg_boundaries[0];
7037 p += offset;
7038 while (offset < BNX2_REGDUMP_LEN) {
7039 *p++ = BNX2_RD(bp, offset);
7040 offset += 4;
7041 if (offset == reg_boundaries[i + 1]) {
7042 offset = reg_boundaries[i + 2];
7043 p = (u32 *) (orig_p + offset);
7044 i += 2;
7045 }
7046 }
7047 }
7048
7049 static void
7050 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7051 {
7052 struct bnx2 *bp = netdev_priv(dev);
7053
7054 if (bp->flags & BNX2_FLAG_NO_WOL) {
7055 wol->supported = 0;
7056 wol->wolopts = 0;
7057 }
7058 else {
7059 wol->supported = WAKE_MAGIC;
7060 if (bp->wol)
7061 wol->wolopts = WAKE_MAGIC;
7062 else
7063 wol->wolopts = 0;
7064 }
7065 memset(&wol->sopass, 0, sizeof(wol->sopass));
7066 }
7067
7068 static int
7069 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7070 {
7071 struct bnx2 *bp = netdev_priv(dev);
7072
7073 if (wol->wolopts & ~WAKE_MAGIC)
7074 return -EINVAL;
7075
7076 if (wol->wolopts & WAKE_MAGIC) {
7077 if (bp->flags & BNX2_FLAG_NO_WOL)
7078 return -EINVAL;
7079
7080 bp->wol = 1;
7081 }
7082 else {
7083 bp->wol = 0;
7084 }
7085 return 0;
7086 }
7087
7088 static int
7089 bnx2_nway_reset(struct net_device *dev)
7090 {
7091 struct bnx2 *bp = netdev_priv(dev);
7092 u32 bmcr;
7093
7094 if (!netif_running(dev))
7095 return -EAGAIN;
7096
7097 if (!(bp->autoneg & AUTONEG_SPEED)) {
7098 return -EINVAL;
7099 }
7100
7101 spin_lock_bh(&bp->phy_lock);
7102
7103 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7104 int rc;
7105
7106 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7107 spin_unlock_bh(&bp->phy_lock);
7108 return rc;
7109 }
7110
7111 /* Force a link down visible on the other side */
7112 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7113 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7114 spin_unlock_bh(&bp->phy_lock);
7115
7116 msleep(20);
7117
7118 spin_lock_bh(&bp->phy_lock);
7119
7120 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7121 bp->serdes_an_pending = 1;
7122 mod_timer(&bp->timer, jiffies + bp->current_interval);
7123 }
7124
7125 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7126 bmcr &= ~BMCR_LOOPBACK;
7127 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7128
7129 spin_unlock_bh(&bp->phy_lock);
7130
7131 return 0;
7132 }
7133
7134 static u32
7135 bnx2_get_link(struct net_device *dev)
7136 {
7137 struct bnx2 *bp = netdev_priv(dev);
7138
7139 return bp->link_up;
7140 }
7141
7142 static int
7143 bnx2_get_eeprom_len(struct net_device *dev)
7144 {
7145 struct bnx2 *bp = netdev_priv(dev);
7146
7147 if (bp->flash_info == NULL)
7148 return 0;
7149
7150 return (int) bp->flash_size;
7151 }
7152
7153 static int
7154 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7155 u8 *eebuf)
7156 {
7157 struct bnx2 *bp = netdev_priv(dev);
7158 int rc;
7159
7160 if (!netif_running(dev))
7161 return -EAGAIN;
7162
7163 /* parameters already validated in ethtool_get_eeprom */
7164
7165 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7166
7167 return rc;
7168 }
7169
7170 static int
7171 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7172 u8 *eebuf)
7173 {
7174 struct bnx2 *bp = netdev_priv(dev);
7175 int rc;
7176
7177 if (!netif_running(dev))
7178 return -EAGAIN;
7179
7180 /* parameters already validated in ethtool_set_eeprom */
7181
7182 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7183
7184 return rc;
7185 }
7186
7187 static int
7188 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7189 {
7190 struct bnx2 *bp = netdev_priv(dev);
7191
7192 memset(coal, 0, sizeof(struct ethtool_coalesce));
7193
7194 coal->rx_coalesce_usecs = bp->rx_ticks;
7195 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7196 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7197 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7198
7199 coal->tx_coalesce_usecs = bp->tx_ticks;
7200 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7201 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7202 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7203
7204 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7205
7206 return 0;
7207 }
7208
7209 static int
7210 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7211 {
7212 struct bnx2 *bp = netdev_priv(dev);
7213
7214 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7215 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7216
7217 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7218 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7219
7220 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7221 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7222
7223 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7224 if (bp->rx_quick_cons_trip_int > 0xff)
7225 bp->rx_quick_cons_trip_int = 0xff;
7226
7227 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7228 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7229
7230 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7231 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7232
7233 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7234 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7235
7236 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7237 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7238 0xff;
7239
7240 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7241 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7242 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7243 bp->stats_ticks = USEC_PER_SEC;
7244 }
7245 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7246 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7247 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7248
7249 if (netif_running(bp->dev)) {
7250 bnx2_netif_stop(bp, true);
7251 bnx2_init_nic(bp, 0);
7252 bnx2_netif_start(bp, true);
7253 }
7254
7255 return 0;
7256 }
7257
7258 static void
7259 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7260 {
7261 struct bnx2 *bp = netdev_priv(dev);
7262
7263 ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7264 ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7265
7266 ering->rx_pending = bp->rx_ring_size;
7267 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7268
7269 ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7270 ering->tx_pending = bp->tx_ring_size;
7271 }
7272
7273 static int
7274 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7275 {
7276 if (netif_running(bp->dev)) {
7277 /* Reset will erase chipset stats; save them */
7278 bnx2_save_stats(bp);
7279
7280 bnx2_netif_stop(bp, true);
7281 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7282 if (reset_irq) {
7283 bnx2_free_irq(bp);
7284 bnx2_del_napi(bp);
7285 } else {
7286 __bnx2_free_irq(bp);
7287 }
7288 bnx2_free_skbs(bp);
7289 bnx2_free_mem(bp);
7290 }
7291
7292 bnx2_set_rx_ring_size(bp, rx);
7293 bp->tx_ring_size = tx;
7294
7295 if (netif_running(bp->dev)) {
7296 int rc = 0;
7297
7298 if (reset_irq) {
7299 rc = bnx2_setup_int_mode(bp, disable_msi);
7300 bnx2_init_napi(bp);
7301 }
7302
7303 if (!rc)
7304 rc = bnx2_alloc_mem(bp);
7305
7306 if (!rc)
7307 rc = bnx2_request_irq(bp);
7308
7309 if (!rc)
7310 rc = bnx2_init_nic(bp, 0);
7311
7312 if (rc) {
7313 bnx2_napi_enable(bp);
7314 dev_close(bp->dev);
7315 return rc;
7316 }
7317 #ifdef BCM_CNIC
7318 mutex_lock(&bp->cnic_lock);
7319 /* Let cnic know about the new status block. */
7320 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7321 bnx2_setup_cnic_irq_info(bp);
7322 mutex_unlock(&bp->cnic_lock);
7323 #endif
7324 bnx2_netif_start(bp, true);
7325 }
7326 return 0;
7327 }
7328
7329 static int
7330 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7331 {
7332 struct bnx2 *bp = netdev_priv(dev);
7333 int rc;
7334
7335 if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7336 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7337 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7338
7339 return -EINVAL;
7340 }
7341 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7342 false);
7343 return rc;
7344 }
7345
7346 static void
7347 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7348 {
7349 struct bnx2 *bp = netdev_priv(dev);
7350
7351 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7352 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7353 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7354 }
7355
7356 static int
7357 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7358 {
7359 struct bnx2 *bp = netdev_priv(dev);
7360
7361 bp->req_flow_ctrl = 0;
7362 if (epause->rx_pause)
7363 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7364 if (epause->tx_pause)
7365 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7366
7367 if (epause->autoneg) {
7368 bp->autoneg |= AUTONEG_FLOW_CTRL;
7369 }
7370 else {
7371 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7372 }
7373
7374 if (netif_running(dev)) {
7375 spin_lock_bh(&bp->phy_lock);
7376 bnx2_setup_phy(bp, bp->phy_port);
7377 spin_unlock_bh(&bp->phy_lock);
7378 }
7379
7380 return 0;
7381 }
7382
7383 static struct {
7384 char string[ETH_GSTRING_LEN];
7385 } bnx2_stats_str_arr[] = {
7386 { "rx_bytes" },
7387 { "rx_error_bytes" },
7388 { "tx_bytes" },
7389 { "tx_error_bytes" },
7390 { "rx_ucast_packets" },
7391 { "rx_mcast_packets" },
7392 { "rx_bcast_packets" },
7393 { "tx_ucast_packets" },
7394 { "tx_mcast_packets" },
7395 { "tx_bcast_packets" },
7396 { "tx_mac_errors" },
7397 { "tx_carrier_errors" },
7398 { "rx_crc_errors" },
7399 { "rx_align_errors" },
7400 { "tx_single_collisions" },
7401 { "tx_multi_collisions" },
7402 { "tx_deferred" },
7403 { "tx_excess_collisions" },
7404 { "tx_late_collisions" },
7405 { "tx_total_collisions" },
7406 { "rx_fragments" },
7407 { "rx_jabbers" },
7408 { "rx_undersize_packets" },
7409 { "rx_oversize_packets" },
7410 { "rx_64_byte_packets" },
7411 { "rx_65_to_127_byte_packets" },
7412 { "rx_128_to_255_byte_packets" },
7413 { "rx_256_to_511_byte_packets" },
7414 { "rx_512_to_1023_byte_packets" },
7415 { "rx_1024_to_1522_byte_packets" },
7416 { "rx_1523_to_9022_byte_packets" },
7417 { "tx_64_byte_packets" },
7418 { "tx_65_to_127_byte_packets" },
7419 { "tx_128_to_255_byte_packets" },
7420 { "tx_256_to_511_byte_packets" },
7421 { "tx_512_to_1023_byte_packets" },
7422 { "tx_1024_to_1522_byte_packets" },
7423 { "tx_1523_to_9022_byte_packets" },
7424 { "rx_xon_frames" },
7425 { "rx_xoff_frames" },
7426 { "tx_xon_frames" },
7427 { "tx_xoff_frames" },
7428 { "rx_mac_ctrl_frames" },
7429 { "rx_filtered_packets" },
7430 { "rx_ftq_discards" },
7431 { "rx_discards" },
7432 { "rx_fw_discards" },
7433 };
7434
7435 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7436
7437 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7438
7439 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7440 STATS_OFFSET32(stat_IfHCInOctets_hi),
7441 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7442 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7443 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7444 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7445 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7446 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7447 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7448 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7449 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7450 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7451 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7452 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7453 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7454 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7455 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7456 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7457 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7458 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7459 STATS_OFFSET32(stat_EtherStatsCollisions),
7460 STATS_OFFSET32(stat_EtherStatsFragments),
7461 STATS_OFFSET32(stat_EtherStatsJabbers),
7462 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7463 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7464 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7465 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7466 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7467 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7468 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7469 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7470 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7471 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7472 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7473 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7474 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7475 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7476 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7477 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7478 STATS_OFFSET32(stat_XonPauseFramesReceived),
7479 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7480 STATS_OFFSET32(stat_OutXonSent),
7481 STATS_OFFSET32(stat_OutXoffSent),
7482 STATS_OFFSET32(stat_MacControlFramesReceived),
7483 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7484 STATS_OFFSET32(stat_IfInFTQDiscards),
7485 STATS_OFFSET32(stat_IfInMBUFDiscards),
7486 STATS_OFFSET32(stat_FwRxDrop),
7487 };
7488
7489 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7490 * skipped because of errata.
7491 */
7492 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7493 8,0,8,8,8,8,8,8,8,8,
7494 4,0,4,4,4,4,4,4,4,4,
7495 4,4,4,4,4,4,4,4,4,4,
7496 4,4,4,4,4,4,4,4,4,4,
7497 4,4,4,4,4,4,4,
7498 };
7499
7500 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7501 8,0,8,8,8,8,8,8,8,8,
7502 4,4,4,4,4,4,4,4,4,4,
7503 4,4,4,4,4,4,4,4,4,4,
7504 4,4,4,4,4,4,4,4,4,4,
7505 4,4,4,4,4,4,4,
7506 };
7507
7508 #define BNX2_NUM_TESTS 6
7509
7510 static struct {
7511 char string[ETH_GSTRING_LEN];
7512 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7513 { "register_test (offline)" },
7514 { "memory_test (offline)" },
7515 { "loopback_test (offline)" },
7516 { "nvram_test (online)" },
7517 { "interrupt_test (online)" },
7518 { "link_test (online)" },
7519 };
7520
7521 static int
7522 bnx2_get_sset_count(struct net_device *dev, int sset)
7523 {
7524 switch (sset) {
7525 case ETH_SS_TEST:
7526 return BNX2_NUM_TESTS;
7527 case ETH_SS_STATS:
7528 return BNX2_NUM_STATS;
7529 default:
7530 return -EOPNOTSUPP;
7531 }
7532 }
7533
7534 static void
7535 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7536 {
7537 struct bnx2 *bp = netdev_priv(dev);
7538
7539 bnx2_set_power_state(bp, PCI_D0);
7540
7541 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7542 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7543 int i;
7544
7545 bnx2_netif_stop(bp, true);
7546 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7547 bnx2_free_skbs(bp);
7548
7549 if (bnx2_test_registers(bp) != 0) {
7550 buf[0] = 1;
7551 etest->flags |= ETH_TEST_FL_FAILED;
7552 }
7553 if (bnx2_test_memory(bp) != 0) {
7554 buf[1] = 1;
7555 etest->flags |= ETH_TEST_FL_FAILED;
7556 }
7557 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7558 etest->flags |= ETH_TEST_FL_FAILED;
7559
7560 if (!netif_running(bp->dev))
7561 bnx2_shutdown_chip(bp);
7562 else {
7563 bnx2_init_nic(bp, 1);
7564 bnx2_netif_start(bp, true);
7565 }
7566
7567 /* wait for link up */
7568 for (i = 0; i < 7; i++) {
7569 if (bp->link_up)
7570 break;
7571 msleep_interruptible(1000);
7572 }
7573 }
7574
7575 if (bnx2_test_nvram(bp) != 0) {
7576 buf[3] = 1;
7577 etest->flags |= ETH_TEST_FL_FAILED;
7578 }
7579 if (bnx2_test_intr(bp) != 0) {
7580 buf[4] = 1;
7581 etest->flags |= ETH_TEST_FL_FAILED;
7582 }
7583
7584 if (bnx2_test_link(bp) != 0) {
7585 buf[5] = 1;
7586 etest->flags |= ETH_TEST_FL_FAILED;
7587
7588 }
7589 if (!netif_running(bp->dev))
7590 bnx2_set_power_state(bp, PCI_D3hot);
7591 }
7592
7593 static void
7594 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7595 {
7596 switch (stringset) {
7597 case ETH_SS_STATS:
7598 memcpy(buf, bnx2_stats_str_arr,
7599 sizeof(bnx2_stats_str_arr));
7600 break;
7601 case ETH_SS_TEST:
7602 memcpy(buf, bnx2_tests_str_arr,
7603 sizeof(bnx2_tests_str_arr));
7604 break;
7605 }
7606 }
7607
7608 static void
7609 bnx2_get_ethtool_stats(struct net_device *dev,
7610 struct ethtool_stats *stats, u64 *buf)
7611 {
7612 struct bnx2 *bp = netdev_priv(dev);
7613 int i;
7614 u32 *hw_stats = (u32 *) bp->stats_blk;
7615 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7616 u8 *stats_len_arr = NULL;
7617
7618 if (hw_stats == NULL) {
7619 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7620 return;
7621 }
7622
7623 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7624 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7625 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7626 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7627 stats_len_arr = bnx2_5706_stats_len_arr;
7628 else
7629 stats_len_arr = bnx2_5708_stats_len_arr;
7630
7631 for (i = 0; i < BNX2_NUM_STATS; i++) {
7632 unsigned long offset;
7633
7634 if (stats_len_arr[i] == 0) {
7635 /* skip this counter */
7636 buf[i] = 0;
7637 continue;
7638 }
7639
7640 offset = bnx2_stats_offset_arr[i];
7641 if (stats_len_arr[i] == 4) {
7642 /* 4-byte counter */
7643 buf[i] = (u64) *(hw_stats + offset) +
7644 *(temp_stats + offset);
7645 continue;
7646 }
7647 /* 8-byte counter */
7648 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7649 *(hw_stats + offset + 1) +
7650 (((u64) *(temp_stats + offset)) << 32) +
7651 *(temp_stats + offset + 1);
7652 }
7653 }
7654
7655 static int
7656 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7657 {
7658 struct bnx2 *bp = netdev_priv(dev);
7659
7660 switch (state) {
7661 case ETHTOOL_ID_ACTIVE:
7662 bnx2_set_power_state(bp, PCI_D0);
7663
7664 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7665 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7666 return 1; /* cycle on/off once per second */
7667
7668 case ETHTOOL_ID_ON:
7669 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7670 BNX2_EMAC_LED_1000MB_OVERRIDE |
7671 BNX2_EMAC_LED_100MB_OVERRIDE |
7672 BNX2_EMAC_LED_10MB_OVERRIDE |
7673 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7674 BNX2_EMAC_LED_TRAFFIC);
7675 break;
7676
7677 case ETHTOOL_ID_OFF:
7678 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7679 break;
7680
7681 case ETHTOOL_ID_INACTIVE:
7682 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7683 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7684
7685 if (!netif_running(dev))
7686 bnx2_set_power_state(bp, PCI_D3hot);
7687 break;
7688 }
7689
7690 return 0;
7691 }
7692
7693 static netdev_features_t
7694 bnx2_fix_features(struct net_device *dev, netdev_features_t features)
7695 {
7696 struct bnx2 *bp = netdev_priv(dev);
7697
7698 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7699 features |= NETIF_F_HW_VLAN_RX;
7700
7701 return features;
7702 }
7703
7704 static int
7705 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7706 {
7707 struct bnx2 *bp = netdev_priv(dev);
7708
7709 /* TSO with VLAN tag won't work with current firmware */
7710 if (features & NETIF_F_HW_VLAN_TX)
7711 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7712 else
7713 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7714
7715 if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7716 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7717 netif_running(dev)) {
7718 bnx2_netif_stop(bp, false);
7719 dev->features = features;
7720 bnx2_set_rx_mode(dev);
7721 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7722 bnx2_netif_start(bp, false);
7723 return 1;
7724 }
7725
7726 return 0;
7727 }
7728
7729 static void bnx2_get_channels(struct net_device *dev,
7730 struct ethtool_channels *channels)
7731 {
7732 struct bnx2 *bp = netdev_priv(dev);
7733 u32 max_rx_rings = 1;
7734 u32 max_tx_rings = 1;
7735
7736 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7737 max_rx_rings = RX_MAX_RINGS;
7738 max_tx_rings = TX_MAX_RINGS;
7739 }
7740
7741 channels->max_rx = max_rx_rings;
7742 channels->max_tx = max_tx_rings;
7743 channels->max_other = 0;
7744 channels->max_combined = 0;
7745 channels->rx_count = bp->num_rx_rings;
7746 channels->tx_count = bp->num_tx_rings;
7747 channels->other_count = 0;
7748 channels->combined_count = 0;
7749 }
7750
7751 static int bnx2_set_channels(struct net_device *dev,
7752 struct ethtool_channels *channels)
7753 {
7754 struct bnx2 *bp = netdev_priv(dev);
7755 u32 max_rx_rings = 1;
7756 u32 max_tx_rings = 1;
7757 int rc = 0;
7758
7759 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7760 max_rx_rings = RX_MAX_RINGS;
7761 max_tx_rings = TX_MAX_RINGS;
7762 }
7763 if (channels->rx_count > max_rx_rings ||
7764 channels->tx_count > max_tx_rings)
7765 return -EINVAL;
7766
7767 bp->num_req_rx_rings = channels->rx_count;
7768 bp->num_req_tx_rings = channels->tx_count;
7769
7770 if (netif_running(dev))
7771 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7772 bp->tx_ring_size, true);
7773
7774 return rc;
7775 }
7776
7777 static const struct ethtool_ops bnx2_ethtool_ops = {
7778 .get_settings = bnx2_get_settings,
7779 .set_settings = bnx2_set_settings,
7780 .get_drvinfo = bnx2_get_drvinfo,
7781 .get_regs_len = bnx2_get_regs_len,
7782 .get_regs = bnx2_get_regs,
7783 .get_wol = bnx2_get_wol,
7784 .set_wol = bnx2_set_wol,
7785 .nway_reset = bnx2_nway_reset,
7786 .get_link = bnx2_get_link,
7787 .get_eeprom_len = bnx2_get_eeprom_len,
7788 .get_eeprom = bnx2_get_eeprom,
7789 .set_eeprom = bnx2_set_eeprom,
7790 .get_coalesce = bnx2_get_coalesce,
7791 .set_coalesce = bnx2_set_coalesce,
7792 .get_ringparam = bnx2_get_ringparam,
7793 .set_ringparam = bnx2_set_ringparam,
7794 .get_pauseparam = bnx2_get_pauseparam,
7795 .set_pauseparam = bnx2_set_pauseparam,
7796 .self_test = bnx2_self_test,
7797 .get_strings = bnx2_get_strings,
7798 .set_phys_id = bnx2_set_phys_id,
7799 .get_ethtool_stats = bnx2_get_ethtool_stats,
7800 .get_sset_count = bnx2_get_sset_count,
7801 .get_channels = bnx2_get_channels,
7802 .set_channels = bnx2_set_channels,
7803 };
7804
7805 /* Called with rtnl_lock */
7806 static int
7807 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7808 {
7809 struct mii_ioctl_data *data = if_mii(ifr);
7810 struct bnx2 *bp = netdev_priv(dev);
7811 int err;
7812
7813 switch(cmd) {
7814 case SIOCGMIIPHY:
7815 data->phy_id = bp->phy_addr;
7816
7817 /* fallthru */
7818 case SIOCGMIIREG: {
7819 u32 mii_regval;
7820
7821 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7822 return -EOPNOTSUPP;
7823
7824 if (!netif_running(dev))
7825 return -EAGAIN;
7826
7827 spin_lock_bh(&bp->phy_lock);
7828 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7829 spin_unlock_bh(&bp->phy_lock);
7830
7831 data->val_out = mii_regval;
7832
7833 return err;
7834 }
7835
7836 case SIOCSMIIREG:
7837 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7838 return -EOPNOTSUPP;
7839
7840 if (!netif_running(dev))
7841 return -EAGAIN;
7842
7843 spin_lock_bh(&bp->phy_lock);
7844 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7845 spin_unlock_bh(&bp->phy_lock);
7846
7847 return err;
7848
7849 default:
7850 /* do nothing */
7851 break;
7852 }
7853 return -EOPNOTSUPP;
7854 }
7855
7856 /* Called with rtnl_lock */
7857 static int
7858 bnx2_change_mac_addr(struct net_device *dev, void *p)
7859 {
7860 struct sockaddr *addr = p;
7861 struct bnx2 *bp = netdev_priv(dev);
7862
7863 if (!is_valid_ether_addr(addr->sa_data))
7864 return -EADDRNOTAVAIL;
7865
7866 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7867 if (netif_running(dev))
7868 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7869
7870 return 0;
7871 }
7872
7873 /* Called with rtnl_lock */
7874 static int
7875 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7876 {
7877 struct bnx2 *bp = netdev_priv(dev);
7878
7879 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7880 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7881 return -EINVAL;
7882
7883 dev->mtu = new_mtu;
7884 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7885 false);
7886 }
7887
7888 #ifdef CONFIG_NET_POLL_CONTROLLER
7889 static void
7890 poll_bnx2(struct net_device *dev)
7891 {
7892 struct bnx2 *bp = netdev_priv(dev);
7893 int i;
7894
7895 for (i = 0; i < bp->irq_nvecs; i++) {
7896 struct bnx2_irq *irq = &bp->irq_tbl[i];
7897
7898 disable_irq(irq->vector);
7899 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7900 enable_irq(irq->vector);
7901 }
7902 }
7903 #endif
7904
7905 static void
7906 bnx2_get_5709_media(struct bnx2 *bp)
7907 {
7908 u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7909 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7910 u32 strap;
7911
7912 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7913 return;
7914 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7915 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7916 return;
7917 }
7918
7919 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7920 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7921 else
7922 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7923
7924 if (bp->func == 0) {
7925 switch (strap) {
7926 case 0x4:
7927 case 0x5:
7928 case 0x6:
7929 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7930 return;
7931 }
7932 } else {
7933 switch (strap) {
7934 case 0x1:
7935 case 0x2:
7936 case 0x4:
7937 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7938 return;
7939 }
7940 }
7941 }
7942
7943 static void
7944 bnx2_get_pci_speed(struct bnx2 *bp)
7945 {
7946 u32 reg;
7947
7948 reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7949 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7950 u32 clkreg;
7951
7952 bp->flags |= BNX2_FLAG_PCIX;
7953
7954 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7955
7956 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7957 switch (clkreg) {
7958 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7959 bp->bus_speed_mhz = 133;
7960 break;
7961
7962 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7963 bp->bus_speed_mhz = 100;
7964 break;
7965
7966 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7967 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7968 bp->bus_speed_mhz = 66;
7969 break;
7970
7971 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7972 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7973 bp->bus_speed_mhz = 50;
7974 break;
7975
7976 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7977 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7978 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7979 bp->bus_speed_mhz = 33;
7980 break;
7981 }
7982 }
7983 else {
7984 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7985 bp->bus_speed_mhz = 66;
7986 else
7987 bp->bus_speed_mhz = 33;
7988 }
7989
7990 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7991 bp->flags |= BNX2_FLAG_PCI_32BIT;
7992
7993 }
7994
7995 static void
7996 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7997 {
7998 int rc, i, j;
7999 u8 *data;
8000 unsigned int block_end, rosize, len;
8001
8002 #define BNX2_VPD_NVRAM_OFFSET 0x300
8003 #define BNX2_VPD_LEN 128
8004 #define BNX2_MAX_VER_SLEN 30
8005
8006 data = kmalloc(256, GFP_KERNEL);
8007 if (!data)
8008 return;
8009
8010 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8011 BNX2_VPD_LEN);
8012 if (rc)
8013 goto vpd_done;
8014
8015 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8016 data[i] = data[i + BNX2_VPD_LEN + 3];
8017 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8018 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8019 data[i + 3] = data[i + BNX2_VPD_LEN];
8020 }
8021
8022 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8023 if (i < 0)
8024 goto vpd_done;
8025
8026 rosize = pci_vpd_lrdt_size(&data[i]);
8027 i += PCI_VPD_LRDT_TAG_SIZE;
8028 block_end = i + rosize;
8029
8030 if (block_end > BNX2_VPD_LEN)
8031 goto vpd_done;
8032
8033 j = pci_vpd_find_info_keyword(data, i, rosize,
8034 PCI_VPD_RO_KEYWORD_MFR_ID);
8035 if (j < 0)
8036 goto vpd_done;
8037
8038 len = pci_vpd_info_field_size(&data[j]);
8039
8040 j += PCI_VPD_INFO_FLD_HDR_SIZE;
8041 if (j + len > block_end || len != 4 ||
8042 memcmp(&data[j], "1028", 4))
8043 goto vpd_done;
8044
8045 j = pci_vpd_find_info_keyword(data, i, rosize,
8046 PCI_VPD_RO_KEYWORD_VENDOR0);
8047 if (j < 0)
8048 goto vpd_done;
8049
8050 len = pci_vpd_info_field_size(&data[j]);
8051
8052 j += PCI_VPD_INFO_FLD_HDR_SIZE;
8053 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8054 goto vpd_done;
8055
8056 memcpy(bp->fw_version, &data[j], len);
8057 bp->fw_version[len] = ' ';
8058
8059 vpd_done:
8060 kfree(data);
8061 }
8062
8063 static int
8064 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8065 {
8066 struct bnx2 *bp;
8067 int rc, i, j;
8068 u32 reg;
8069 u64 dma_mask, persist_dma_mask;
8070 int err;
8071
8072 SET_NETDEV_DEV(dev, &pdev->dev);
8073 bp = netdev_priv(dev);
8074
8075 bp->flags = 0;
8076 bp->phy_flags = 0;
8077
8078 bp->temp_stats_blk =
8079 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8080
8081 if (bp->temp_stats_blk == NULL) {
8082 rc = -ENOMEM;
8083 goto err_out;
8084 }
8085
8086 /* enable device (incl. PCI PM wakeup), and bus-mastering */
8087 rc = pci_enable_device(pdev);
8088 if (rc) {
8089 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8090 goto err_out;
8091 }
8092
8093 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8094 dev_err(&pdev->dev,
8095 "Cannot find PCI device base address, aborting\n");
8096 rc = -ENODEV;
8097 goto err_out_disable;
8098 }
8099
8100 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8101 if (rc) {
8102 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8103 goto err_out_disable;
8104 }
8105
8106 pci_set_master(pdev);
8107
8108 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8109 if (bp->pm_cap == 0) {
8110 dev_err(&pdev->dev,
8111 "Cannot find power management capability, aborting\n");
8112 rc = -EIO;
8113 goto err_out_release;
8114 }
8115
8116 bp->dev = dev;
8117 bp->pdev = pdev;
8118
8119 spin_lock_init(&bp->phy_lock);
8120 spin_lock_init(&bp->indirect_lock);
8121 #ifdef BCM_CNIC
8122 mutex_init(&bp->cnic_lock);
8123 #endif
8124 INIT_WORK(&bp->reset_task, bnx2_reset_task);
8125
8126 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8127 TX_MAX_TSS_RINGS + 1));
8128 if (!bp->regview) {
8129 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8130 rc = -ENOMEM;
8131 goto err_out_release;
8132 }
8133
8134 bnx2_set_power_state(bp, PCI_D0);
8135
8136 /* Configure byte swap and enable write to the reg_window registers.
8137 * Rely on CPU to do target byte swapping on big endian systems
8138 * The chip's target access swapping will not swap all accesses
8139 */
8140 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8141 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8142 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8143
8144 bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8145
8146 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8147 if (!pci_is_pcie(pdev)) {
8148 dev_err(&pdev->dev, "Not PCIE, aborting\n");
8149 rc = -EIO;
8150 goto err_out_unmap;
8151 }
8152 bp->flags |= BNX2_FLAG_PCIE;
8153 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8154 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8155
8156 /* AER (Advanced Error Reporting) hooks */
8157 err = pci_enable_pcie_error_reporting(pdev);
8158 if (!err)
8159 bp->flags |= BNX2_FLAG_AER_ENABLED;
8160
8161 } else {
8162 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8163 if (bp->pcix_cap == 0) {
8164 dev_err(&pdev->dev,
8165 "Cannot find PCIX capability, aborting\n");
8166 rc = -EIO;
8167 goto err_out_unmap;
8168 }
8169 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8170 }
8171
8172 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8173 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8174 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8175 bp->flags |= BNX2_FLAG_MSIX_CAP;
8176 }
8177
8178 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8179 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8180 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8181 bp->flags |= BNX2_FLAG_MSI_CAP;
8182 }
8183
8184 /* 5708 cannot support DMA addresses > 40-bit. */
8185 if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8186 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8187 else
8188 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8189
8190 /* Configure DMA attributes. */
8191 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8192 dev->features |= NETIF_F_HIGHDMA;
8193 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8194 if (rc) {
8195 dev_err(&pdev->dev,
8196 "pci_set_consistent_dma_mask failed, aborting\n");
8197 goto err_out_unmap;
8198 }
8199 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8200 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8201 goto err_out_unmap;
8202 }
8203
8204 if (!(bp->flags & BNX2_FLAG_PCIE))
8205 bnx2_get_pci_speed(bp);
8206
8207 /* 5706A0 may falsely detect SERR and PERR. */
8208 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8209 reg = BNX2_RD(bp, PCI_COMMAND);
8210 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8211 BNX2_WR(bp, PCI_COMMAND, reg);
8212 } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8213 !(bp->flags & BNX2_FLAG_PCIX)) {
8214
8215 dev_err(&pdev->dev,
8216 "5706 A1 can only be used in a PCIX bus, aborting\n");
8217 goto err_out_unmap;
8218 }
8219
8220 bnx2_init_nvram(bp);
8221
8222 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8223
8224 if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8225 bp->func = 1;
8226
8227 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8228 BNX2_SHM_HDR_SIGNATURE_SIG) {
8229 u32 off = bp->func << 2;
8230
8231 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8232 } else
8233 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8234
8235 /* Get the permanent MAC address. First we need to make sure the
8236 * firmware is actually running.
8237 */
8238 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8239
8240 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8241 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8242 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8243 rc = -ENODEV;
8244 goto err_out_unmap;
8245 }
8246
8247 bnx2_read_vpd_fw_ver(bp);
8248
8249 j = strlen(bp->fw_version);
8250 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8251 for (i = 0; i < 3 && j < 24; i++) {
8252 u8 num, k, skip0;
8253
8254 if (i == 0) {
8255 bp->fw_version[j++] = 'b';
8256 bp->fw_version[j++] = 'c';
8257 bp->fw_version[j++] = ' ';
8258 }
8259 num = (u8) (reg >> (24 - (i * 8)));
8260 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8261 if (num >= k || !skip0 || k == 1) {
8262 bp->fw_version[j++] = (num / k) + '0';
8263 skip0 = 0;
8264 }
8265 }
8266 if (i != 2)
8267 bp->fw_version[j++] = '.';
8268 }
8269 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8270 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8271 bp->wol = 1;
8272
8273 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8274 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8275
8276 for (i = 0; i < 30; i++) {
8277 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8278 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8279 break;
8280 msleep(10);
8281 }
8282 }
8283 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8284 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8285 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8286 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8287 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8288
8289 if (j < 32)
8290 bp->fw_version[j++] = ' ';
8291 for (i = 0; i < 3 && j < 28; i++) {
8292 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8293 reg = be32_to_cpu(reg);
8294 memcpy(&bp->fw_version[j], &reg, 4);
8295 j += 4;
8296 }
8297 }
8298
8299 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8300 bp->mac_addr[0] = (u8) (reg >> 8);
8301 bp->mac_addr[1] = (u8) reg;
8302
8303 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8304 bp->mac_addr[2] = (u8) (reg >> 24);
8305 bp->mac_addr[3] = (u8) (reg >> 16);
8306 bp->mac_addr[4] = (u8) (reg >> 8);
8307 bp->mac_addr[5] = (u8) reg;
8308
8309 bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8310 bnx2_set_rx_ring_size(bp, 255);
8311
8312 bp->tx_quick_cons_trip_int = 2;
8313 bp->tx_quick_cons_trip = 20;
8314 bp->tx_ticks_int = 18;
8315 bp->tx_ticks = 80;
8316
8317 bp->rx_quick_cons_trip_int = 2;
8318 bp->rx_quick_cons_trip = 12;
8319 bp->rx_ticks_int = 18;
8320 bp->rx_ticks = 18;
8321
8322 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8323
8324 bp->current_interval = BNX2_TIMER_INTERVAL;
8325
8326 bp->phy_addr = 1;
8327
8328 /* Disable WOL support if we are running on a SERDES chip. */
8329 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8330 bnx2_get_5709_media(bp);
8331 else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8332 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8333
8334 bp->phy_port = PORT_TP;
8335 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8336 bp->phy_port = PORT_FIBRE;
8337 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8338 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8339 bp->flags |= BNX2_FLAG_NO_WOL;
8340 bp->wol = 0;
8341 }
8342 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8343 /* Don't do parallel detect on this board because of
8344 * some board problems. The link will not go down
8345 * if we do parallel detect.
8346 */
8347 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8348 pdev->subsystem_device == 0x310c)
8349 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8350 } else {
8351 bp->phy_addr = 2;
8352 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8353 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8354 }
8355 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8356 BNX2_CHIP(bp) == BNX2_CHIP_5708)
8357 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8358 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8359 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8360 BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8361 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8362
8363 bnx2_init_fw_cap(bp);
8364
8365 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8366 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8367 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8368 !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8369 bp->flags |= BNX2_FLAG_NO_WOL;
8370 bp->wol = 0;
8371 }
8372
8373 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8374 bp->tx_quick_cons_trip_int =
8375 bp->tx_quick_cons_trip;
8376 bp->tx_ticks_int = bp->tx_ticks;
8377 bp->rx_quick_cons_trip_int =
8378 bp->rx_quick_cons_trip;
8379 bp->rx_ticks_int = bp->rx_ticks;
8380 bp->comp_prod_trip_int = bp->comp_prod_trip;
8381 bp->com_ticks_int = bp->com_ticks;
8382 bp->cmd_ticks_int = bp->cmd_ticks;
8383 }
8384
8385 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8386 *
8387 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8388 * with byte enables disabled on the unused 32-bit word. This is legal
8389 * but causes problems on the AMD 8132 which will eventually stop
8390 * responding after a while.
8391 *
8392 * AMD believes this incompatibility is unique to the 5706, and
8393 * prefers to locally disable MSI rather than globally disabling it.
8394 */
8395 if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8396 struct pci_dev *amd_8132 = NULL;
8397
8398 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8399 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8400 amd_8132))) {
8401
8402 if (amd_8132->revision >= 0x10 &&
8403 amd_8132->revision <= 0x13) {
8404 disable_msi = 1;
8405 pci_dev_put(amd_8132);
8406 break;
8407 }
8408 }
8409 }
8410
8411 bnx2_set_default_link(bp);
8412 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8413
8414 init_timer(&bp->timer);
8415 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8416 bp->timer.data = (unsigned long) bp;
8417 bp->timer.function = bnx2_timer;
8418
8419 #ifdef BCM_CNIC
8420 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8421 bp->cnic_eth_dev.max_iscsi_conn =
8422 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8423 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8424 bp->cnic_probe = bnx2_cnic_probe;
8425 #endif
8426 pci_save_state(pdev);
8427
8428 return 0;
8429
8430 err_out_unmap:
8431 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8432 pci_disable_pcie_error_reporting(pdev);
8433 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8434 }
8435
8436 pci_iounmap(pdev, bp->regview);
8437 bp->regview = NULL;
8438
8439 err_out_release:
8440 pci_release_regions(pdev);
8441
8442 err_out_disable:
8443 pci_disable_device(pdev);
8444 pci_set_drvdata(pdev, NULL);
8445
8446 err_out:
8447 return rc;
8448 }
8449
8450 static char *
8451 bnx2_bus_string(struct bnx2 *bp, char *str)
8452 {
8453 char *s = str;
8454
8455 if (bp->flags & BNX2_FLAG_PCIE) {
8456 s += sprintf(s, "PCI Express");
8457 } else {
8458 s += sprintf(s, "PCI");
8459 if (bp->flags & BNX2_FLAG_PCIX)
8460 s += sprintf(s, "-X");
8461 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8462 s += sprintf(s, " 32-bit");
8463 else
8464 s += sprintf(s, " 64-bit");
8465 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8466 }
8467 return str;
8468 }
8469
8470 static void
8471 bnx2_del_napi(struct bnx2 *bp)
8472 {
8473 int i;
8474
8475 for (i = 0; i < bp->irq_nvecs; i++)
8476 netif_napi_del(&bp->bnx2_napi[i].napi);
8477 }
8478
8479 static void
8480 bnx2_init_napi(struct bnx2 *bp)
8481 {
8482 int i;
8483
8484 for (i = 0; i < bp->irq_nvecs; i++) {
8485 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8486 int (*poll)(struct napi_struct *, int);
8487
8488 if (i == 0)
8489 poll = bnx2_poll;
8490 else
8491 poll = bnx2_poll_msix;
8492
8493 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8494 bnapi->bp = bp;
8495 }
8496 }
8497
8498 static const struct net_device_ops bnx2_netdev_ops = {
8499 .ndo_open = bnx2_open,
8500 .ndo_start_xmit = bnx2_start_xmit,
8501 .ndo_stop = bnx2_close,
8502 .ndo_get_stats64 = bnx2_get_stats64,
8503 .ndo_set_rx_mode = bnx2_set_rx_mode,
8504 .ndo_do_ioctl = bnx2_ioctl,
8505 .ndo_validate_addr = eth_validate_addr,
8506 .ndo_set_mac_address = bnx2_change_mac_addr,
8507 .ndo_change_mtu = bnx2_change_mtu,
8508 .ndo_fix_features = bnx2_fix_features,
8509 .ndo_set_features = bnx2_set_features,
8510 .ndo_tx_timeout = bnx2_tx_timeout,
8511 #ifdef CONFIG_NET_POLL_CONTROLLER
8512 .ndo_poll_controller = poll_bnx2,
8513 #endif
8514 };
8515
8516 static int
8517 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8518 {
8519 static int version_printed = 0;
8520 struct net_device *dev;
8521 struct bnx2 *bp;
8522 int rc;
8523 char str[40];
8524
8525 if (version_printed++ == 0)
8526 pr_info("%s", version);
8527
8528 /* dev zeroed in init_etherdev */
8529 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8530 if (!dev)
8531 return -ENOMEM;
8532
8533 rc = bnx2_init_board(pdev, dev);
8534 if (rc < 0)
8535 goto err_free;
8536
8537 dev->netdev_ops = &bnx2_netdev_ops;
8538 dev->watchdog_timeo = TX_TIMEOUT;
8539 dev->ethtool_ops = &bnx2_ethtool_ops;
8540
8541 bp = netdev_priv(dev);
8542
8543 pci_set_drvdata(pdev, dev);
8544
8545 memcpy(dev->dev_addr, bp->mac_addr, 6);
8546 memcpy(dev->perm_addr, bp->mac_addr, 6);
8547
8548 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8549 NETIF_F_TSO | NETIF_F_TSO_ECN |
8550 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8551
8552 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8553 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8554
8555 dev->vlan_features = dev->hw_features;
8556 dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8557 dev->features |= dev->hw_features;
8558 dev->priv_flags |= IFF_UNICAST_FLT;
8559
8560 if ((rc = register_netdev(dev))) {
8561 dev_err(&pdev->dev, "Cannot register net device\n");
8562 goto error;
8563 }
8564
8565 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8566 "node addr %pM\n", board_info[ent->driver_data].name,
8567 ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8568 ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8569 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8570 pdev->irq, dev->dev_addr);
8571
8572 return 0;
8573
8574 error:
8575 pci_iounmap(pdev, bp->regview);
8576 pci_release_regions(pdev);
8577 pci_disable_device(pdev);
8578 pci_set_drvdata(pdev, NULL);
8579 err_free:
8580 free_netdev(dev);
8581 return rc;
8582 }
8583
8584 static void
8585 bnx2_remove_one(struct pci_dev *pdev)
8586 {
8587 struct net_device *dev = pci_get_drvdata(pdev);
8588 struct bnx2 *bp = netdev_priv(dev);
8589
8590 unregister_netdev(dev);
8591
8592 del_timer_sync(&bp->timer);
8593 cancel_work_sync(&bp->reset_task);
8594
8595 pci_iounmap(bp->pdev, bp->regview);
8596
8597 kfree(bp->temp_stats_blk);
8598
8599 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8600 pci_disable_pcie_error_reporting(pdev);
8601 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8602 }
8603
8604 bnx2_release_firmware(bp);
8605
8606 free_netdev(dev);
8607
8608 pci_release_regions(pdev);
8609 pci_disable_device(pdev);
8610 pci_set_drvdata(pdev, NULL);
8611 }
8612
8613 static int
8614 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8615 {
8616 struct net_device *dev = pci_get_drvdata(pdev);
8617 struct bnx2 *bp = netdev_priv(dev);
8618
8619 /* PCI register 4 needs to be saved whether netif_running() or not.
8620 * MSI address and data need to be saved if using MSI and
8621 * netif_running().
8622 */
8623 pci_save_state(pdev);
8624 if (!netif_running(dev))
8625 return 0;
8626
8627 cancel_work_sync(&bp->reset_task);
8628 bnx2_netif_stop(bp, true);
8629 netif_device_detach(dev);
8630 del_timer_sync(&bp->timer);
8631 bnx2_shutdown_chip(bp);
8632 bnx2_free_skbs(bp);
8633 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8634 return 0;
8635 }
8636
8637 static int
8638 bnx2_resume(struct pci_dev *pdev)
8639 {
8640 struct net_device *dev = pci_get_drvdata(pdev);
8641 struct bnx2 *bp = netdev_priv(dev);
8642
8643 pci_restore_state(pdev);
8644 if (!netif_running(dev))
8645 return 0;
8646
8647 bnx2_set_power_state(bp, PCI_D0);
8648 netif_device_attach(dev);
8649 bnx2_init_nic(bp, 1);
8650 bnx2_netif_start(bp, true);
8651 return 0;
8652 }
8653
8654 /**
8655 * bnx2_io_error_detected - called when PCI error is detected
8656 * @pdev: Pointer to PCI device
8657 * @state: The current pci connection state
8658 *
8659 * This function is called after a PCI bus error affecting
8660 * this device has been detected.
8661 */
8662 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8663 pci_channel_state_t state)
8664 {
8665 struct net_device *dev = pci_get_drvdata(pdev);
8666 struct bnx2 *bp = netdev_priv(dev);
8667
8668 rtnl_lock();
8669 netif_device_detach(dev);
8670
8671 if (state == pci_channel_io_perm_failure) {
8672 rtnl_unlock();
8673 return PCI_ERS_RESULT_DISCONNECT;
8674 }
8675
8676 if (netif_running(dev)) {
8677 bnx2_netif_stop(bp, true);
8678 del_timer_sync(&bp->timer);
8679 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8680 }
8681
8682 pci_disable_device(pdev);
8683 rtnl_unlock();
8684
8685 /* Request a slot slot reset. */
8686 return PCI_ERS_RESULT_NEED_RESET;
8687 }
8688
8689 /**
8690 * bnx2_io_slot_reset - called after the pci bus has been reset.
8691 * @pdev: Pointer to PCI device
8692 *
8693 * Restart the card from scratch, as if from a cold-boot.
8694 */
8695 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8696 {
8697 struct net_device *dev = pci_get_drvdata(pdev);
8698 struct bnx2 *bp = netdev_priv(dev);
8699 pci_ers_result_t result;
8700 int err;
8701
8702 rtnl_lock();
8703 if (pci_enable_device(pdev)) {
8704 dev_err(&pdev->dev,
8705 "Cannot re-enable PCI device after reset\n");
8706 result = PCI_ERS_RESULT_DISCONNECT;
8707 } else {
8708 pci_set_master(pdev);
8709 pci_restore_state(pdev);
8710 pci_save_state(pdev);
8711
8712 if (netif_running(dev)) {
8713 bnx2_set_power_state(bp, PCI_D0);
8714 bnx2_init_nic(bp, 1);
8715 }
8716 result = PCI_ERS_RESULT_RECOVERED;
8717 }
8718 rtnl_unlock();
8719
8720 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8721 return result;
8722
8723 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8724 if (err) {
8725 dev_err(&pdev->dev,
8726 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8727 err); /* non-fatal, continue */
8728 }
8729
8730 return result;
8731 }
8732
8733 /**
8734 * bnx2_io_resume - called when traffic can start flowing again.
8735 * @pdev: Pointer to PCI device
8736 *
8737 * This callback is called when the error recovery driver tells us that
8738 * its OK to resume normal operation.
8739 */
8740 static void bnx2_io_resume(struct pci_dev *pdev)
8741 {
8742 struct net_device *dev = pci_get_drvdata(pdev);
8743 struct bnx2 *bp = netdev_priv(dev);
8744
8745 rtnl_lock();
8746 if (netif_running(dev))
8747 bnx2_netif_start(bp, true);
8748
8749 netif_device_attach(dev);
8750 rtnl_unlock();
8751 }
8752
8753 static const struct pci_error_handlers bnx2_err_handler = {
8754 .error_detected = bnx2_io_error_detected,
8755 .slot_reset = bnx2_io_slot_reset,
8756 .resume = bnx2_io_resume,
8757 };
8758
8759 static struct pci_driver bnx2_pci_driver = {
8760 .name = DRV_MODULE_NAME,
8761 .id_table = bnx2_pci_tbl,
8762 .probe = bnx2_init_one,
8763 .remove = bnx2_remove_one,
8764 .suspend = bnx2_suspend,
8765 .resume = bnx2_resume,
8766 .err_handler = &bnx2_err_handler,
8767 };
8768
8769 static int __init bnx2_init(void)
8770 {
8771 return pci_register_driver(&bnx2_pci_driver);
8772 }
8773
8774 static void __exit bnx2_cleanup(void)
8775 {
8776 pci_unregister_driver(&bnx2_pci_driver);
8777 }
8778
8779 module_init(bnx2_init);
8780 module_exit(bnx2_cleanup);
8781
8782
8783