bnx2x: Unlimited Tx interrupt work
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
56
2b144023
EG
57#define DRV_MODULE_VERSION "1.48.102"
58#define DRV_MODULE_RELDATE "2009/02/12"
34f80b04 59#define BNX2X_BC_VER 0x040200
a2fbb9ea 60
34f80b04
EG
61/* Time in jiffies before concluding the transmitter is hung */
62#define TX_TIMEOUT (5*HZ)
a2fbb9ea 63
53a10565 64static char version[] __devinitdata =
34f80b04 65 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
66 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
24e3fcef 68MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 69MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
70MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 72
555f6c78
EG
73static int multi_mode = 1;
74module_param(multi_mode, int, 0);
75
19680c48 76static int disable_tpa;
19680c48 77module_param(disable_tpa, int, 0);
9898f86d 78MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
79
80static int int_mode;
81module_param(int_mode, int, 0);
82MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
83
9898f86d 84static int poll;
a2fbb9ea 85module_param(poll, int, 0);
9898f86d 86MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
87
88static int mrrs = -1;
89module_param(mrrs, int, 0);
90MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
91
9898f86d 92static int debug;
a2fbb9ea 93module_param(debug, int, 0);
9898f86d
EG
94MODULE_PARM_DESC(debug, " Default debug msglevel");
95
96static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 97
1cf167f2 98static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
ad8d3948 238 if (!cnt) {
a2fbb9ea
ET
239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
ad8d3948 242 cnt--;
12469401
YG
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
a2fbb9ea 248 }
ad8d3948
EG
249
250 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
251}
252
c18487ee 253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 254{
ad8d3948 255 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
34f80b04
EG
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 292 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 293
ad8d3948 294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
301
302 *wb_comp = 0;
303
34f80b04 304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
305
306 udelay(5);
ad8d3948
EG
307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
ad8d3948 310 if (!cnt) {
a2fbb9ea
ET
311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
ad8d3948 314 cnt--;
12469401
YG
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
a2fbb9ea 320 }
ad8d3948 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
324
325 mutex_unlock(&bp->dmae_mutex);
326}
327
328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 336}
a2fbb9ea 337
ad8d3948
EG
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
a2fbb9ea
ET
349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
a2fbb9ea 351 char last_idx;
34f80b04
EG
352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
354
355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
380 }
381 }
382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
a2fbb9ea
ET
464 }
465 }
34f80b04 466
a2fbb9ea
ET
467 return rc;
468}
c14423fe 469
a2fbb9ea
ET
470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
4781bfad 473 __be32 data[9];
a2fbb9ea
ET
474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
49d66772 485 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
49d66772 492 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
66e855f3
YG
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
a2fbb9ea
ET
505 BNX2X_ERR("begin crash dump -----------------\n");
506
8440d2b6
EG
507 /* Indices */
508 /* Common */
509 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
510 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
511 " spq_prod_idx(%u)\n",
512 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
513 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
514
515 /* Rx */
516 for_each_rx_queue(bp, i) {
a2fbb9ea 517 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 518
8440d2b6 519 BNX2X_ERR("queue[%d]: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
520 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
521 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 522 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
523 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
524 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
525 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
526 " fp_u_idx(%x) *sb_u_idx(%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge,
528 le16_to_cpu(fp->fp_u_idx),
529 fp->status_blk->u_status_block.status_block_index);
530 }
a2fbb9ea 531
8440d2b6
EG
532 /* Tx */
533 for_each_tx_queue(bp, i) {
534 struct bnx2x_fastpath *fp = &bp->fp[i];
535 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 536
8440d2b6
EG
537 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
538 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
539 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
540 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
541 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
542 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
543 fp->status_blk->c_status_block.status_block_index,
544 hw_prods->packets_prod, hw_prods->bds_prod);
545 }
a2fbb9ea 546
8440d2b6
EG
547 /* Rings */
548 /* Rx */
549 for_each_rx_queue(bp, i) {
550 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
551
552 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
553 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 554 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
555 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
556 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
557
558 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 559 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
560 }
561
3196a88a
EG
562 start = RX_SGE(fp->rx_sge_prod);
563 end = RX_SGE(fp->last_max_sge);
8440d2b6 564 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
565 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
566 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
567
568 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
569 j, rx_sge[1], rx_sge[0], sw_page->page);
570 }
571
a2fbb9ea
ET
572 start = RCQ_BD(fp->rx_comp_cons - 10);
573 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 574 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
575 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
576
577 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
578 j, cqe[0], cqe[1], cqe[2], cqe[3]);
579 }
580 }
581
8440d2b6
EG
582 /* Tx */
583 for_each_tx_queue(bp, i) {
584 struct bnx2x_fastpath *fp = &bp->fp[i];
585
586 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
587 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
588 for (j = start; j != end; j = TX_BD(j + 1)) {
589 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
590
591 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
592 sw_bd->skb, sw_bd->first_bd);
593 }
594
595 start = TX_BD(fp->tx_bd_cons - 10);
596 end = TX_BD(fp->tx_bd_cons + 254);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
599
600 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
601 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
602 }
603 }
a2fbb9ea 604
34f80b04 605 bnx2x_fw_dump(bp);
a2fbb9ea
ET
606 bnx2x_mc_assert(bp);
607 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
608}
609
615f8fd9 610static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 611{
34f80b04 612 int port = BP_PORT(bp);
a2fbb9ea
ET
613 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
614 u32 val = REG_RD(bp, addr);
615 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 616 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
617
618 if (msix) {
8badd27a
EG
619 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
620 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
621 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
622 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
623 } else if (msi) {
624 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
625 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
626 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
627 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
628 } else {
629 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 630 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
631 HC_CONFIG_0_REG_INT_LINE_EN_0 |
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 633
8badd27a
EG
634 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
635 val, port, addr);
615f8fd9
ET
636
637 REG_WR(bp, addr, val);
638
a2fbb9ea
ET
639 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
640 }
641
8badd27a
EG
642 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
643 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
644
645 REG_WR(bp, addr, val);
34f80b04
EG
646
647 if (CHIP_IS_E1H(bp)) {
648 /* init leading/trailing edge */
649 if (IS_E1HMF(bp)) {
8badd27a 650 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 651 if (bp->port.pmf)
4acac6a5
EG
652 /* enable nig and gpio3 attention */
653 val |= 0x1100;
34f80b04
EG
654 } else
655 val = 0xffff;
656
657 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
658 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
659 }
a2fbb9ea
ET
660}
661
615f8fd9 662static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 663{
34f80b04 664 int port = BP_PORT(bp);
a2fbb9ea
ET
665 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
666 u32 val = REG_RD(bp, addr);
667
668 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
675
8badd27a
EG
676 /* flush all outstanding writes */
677 mmiowb();
678
a2fbb9ea
ET
679 REG_WR(bp, addr, val);
680 if (REG_RD(bp, addr) != val)
681 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 682
a2fbb9ea
ET
683}
684
f8ef6e44 685static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 686{
a2fbb9ea 687 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 688 int i, offset;
a2fbb9ea 689
34f80b04 690 /* disable interrupt handling */
a2fbb9ea 691 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
692 if (disable_hw)
693 /* prevent the HW from sending interrupts */
694 bnx2x_int_disable(bp);
a2fbb9ea
ET
695
696 /* make sure all ISRs are done */
697 if (msix) {
8badd27a
EG
698 synchronize_irq(bp->msix_table[0].vector);
699 offset = 1;
a2fbb9ea 700 for_each_queue(bp, i)
8badd27a 701 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
702 } else
703 synchronize_irq(bp->pdev->irq);
704
705 /* make sure sp_task is not running */
1cf167f2
EG
706 cancel_delayed_work(&bp->sp_task);
707 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
708}
709
34f80b04 710/* fast path */
a2fbb9ea
ET
711
712/*
34f80b04 713 * General service functions
a2fbb9ea
ET
714 */
715
34f80b04 716static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
717 u8 storm, u16 index, u8 op, u8 update)
718{
5c862848
EG
719 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
720 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
721 struct igu_ack_register igu_ack;
722
723 igu_ack.status_block_index = index;
724 igu_ack.sb_id_and_flags =
34f80b04 725 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
726 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
727 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
728 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
729
5c862848
EG
730 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
731 (*(u32 *)&igu_ack), hc_addr);
732 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
733}
734
735static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
736{
737 struct host_status_block *fpsb = fp->status_blk;
738 u16 rc = 0;
739
740 barrier(); /* status block is written to by the chip */
741 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
742 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
743 rc |= 1;
744 }
745 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
746 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
747 rc |= 2;
748 }
749 return rc;
750}
751
a2fbb9ea
ET
752static u16 bnx2x_ack_int(struct bnx2x *bp)
753{
5c862848
EG
754 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
755 COMMAND_REG_SIMD_MASK);
756 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 757
5c862848
EG
758 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
759 result, hc_addr);
a2fbb9ea 760
a2fbb9ea
ET
761 return result;
762}
763
764
765/*
766 * fast path service functions
767 */
768
237907c1
EG
769static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
770{
771 u16 tx_cons_sb;
772
773 /* Tell compiler that status block fields can change */
774 barrier();
775 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
776 return (fp->tx_pkt_cons != tx_cons_sb);
777}
778
779static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
780{
781 /* Tell compiler that consumer and producer can change */
782 barrier();
783 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
784}
785
a2fbb9ea
ET
786/* free skb in the packet ring at pos idx
787 * return idx of last bd freed
788 */
789static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
790 u16 idx)
791{
792 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
793 struct eth_tx_bd *tx_bd;
794 struct sk_buff *skb = tx_buf->skb;
34f80b04 795 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
796 int nbd;
797
798 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
799 idx, tx_buf, skb);
800
801 /* unmap first bd */
802 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
805 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
806
807 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 808 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
809#ifdef BNX2X_STOP_ON_ERROR
810 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 811 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
812 bnx2x_panic();
813 }
814#endif
815
816 /* Skip a parse bd and the TSO split header bd
817 since they have no mapping */
818 if (nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820
821 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
822 ETH_TX_BD_FLAGS_TCP_CSUM |
823 ETH_TX_BD_FLAGS_SW_LSO)) {
824 if (--nbd)
825 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 /* is this a TSO split header bd? */
828 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
829 if (--nbd)
830 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
831 }
832 }
833
834 /* now free frags */
835 while (nbd > 0) {
836
837 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
838 tx_bd = &fp->tx_desc_ring[bd_idx];
839 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
840 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
841 if (--nbd)
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843 }
844
845 /* release skb */
53e5e96e 846 WARN_ON(!skb);
a2fbb9ea
ET
847 dev_kfree_skb(skb);
848 tx_buf->first_bd = 0;
849 tx_buf->skb = NULL;
850
34f80b04 851 return new_cons;
a2fbb9ea
ET
852}
853
34f80b04 854static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 855{
34f80b04
EG
856 s16 used;
857 u16 prod;
858 u16 cons;
a2fbb9ea 859
34f80b04 860 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
861 prod = fp->tx_bd_prod;
862 cons = fp->tx_bd_cons;
863
34f80b04
EG
864 /* NUM_TX_RINGS = number of "next-page" entries
865 It will be used as a threshold */
866 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 867
34f80b04 868#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
869 WARN_ON(used < 0);
870 WARN_ON(used > fp->bp->tx_ring_size);
871 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 872#endif
a2fbb9ea 873
34f80b04 874 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
875}
876
7961f791 877static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
878{
879 struct bnx2x *bp = fp->bp;
555f6c78 880 struct netdev_queue *txq;
a2fbb9ea
ET
881 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
882 int done = 0;
883
884#ifdef BNX2X_STOP_ON_ERROR
885 if (unlikely(bp->panic))
886 return;
887#endif
888
555f6c78 889 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
890 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
891 sw_cons = fp->tx_pkt_cons;
892
893 while (sw_cons != hw_cons) {
894 u16 pkt_cons;
895
896 pkt_cons = TX_BD(sw_cons);
897
898 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
899
34f80b04 900 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
901 hw_cons, sw_cons, pkt_cons);
902
34f80b04 903/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
904 rmb();
905 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
906 }
907*/
908 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
909 sw_cons++;
910 done++;
a2fbb9ea
ET
911 }
912
913 fp->tx_pkt_cons = sw_cons;
914 fp->tx_bd_cons = bd_cons;
915
a2fbb9ea 916 /* TBD need a thresh? */
555f6c78 917 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 918
555f6c78 919 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 920
6044735d
EG
921 /* Need to make the tx_bd_cons update visible to start_xmit()
922 * before checking for netif_tx_queue_stopped(). Without the
923 * memory barrier, there is a small possibility that
924 * start_xmit() will miss it and cause the queue to be stopped
925 * forever.
926 */
927 smp_mb();
928
555f6c78 929 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 930 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 931 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 932 netif_tx_wake_queue(txq);
a2fbb9ea 933
555f6c78 934 __netif_tx_unlock(txq);
a2fbb9ea
ET
935 }
936}
937
3196a88a 938
a2fbb9ea
ET
939static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
940 union eth_rx_cqe *rr_cqe)
941{
942 struct bnx2x *bp = fp->bp;
943 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
944 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
945
34f80b04 946 DP(BNX2X_MSG_SP,
a2fbb9ea 947 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 948 fp->index, cid, command, bp->state,
34f80b04 949 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
950
951 bp->spq_left++;
952
0626b899 953 if (fp->index) {
a2fbb9ea
ET
954 switch (command | fp->state) {
955 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
956 BNX2X_FP_STATE_OPENING):
957 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
958 cid);
959 fp->state = BNX2X_FP_STATE_OPEN;
960 break;
961
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
963 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
964 cid);
965 fp->state = BNX2X_FP_STATE_HALTED;
966 break;
967
968 default:
34f80b04
EG
969 BNX2X_ERR("unexpected MC reply (%d) "
970 "fp->state is %x\n", command, fp->state);
971 break;
a2fbb9ea 972 }
34f80b04 973 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
974 return;
975 }
c14423fe 976
a2fbb9ea
ET
977 switch (command | bp->state) {
978 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
979 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
980 bp->state = BNX2X_STATE_OPEN;
981 break;
982
983 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
984 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
985 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
986 fp->state = BNX2X_FP_STATE_HALTED;
987 break;
988
a2fbb9ea 989 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 990 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 991 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
992 break;
993
3196a88a 994
a2fbb9ea 995 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 996 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 997 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 998 bp->set_mac_pending = 0;
a2fbb9ea
ET
999 break;
1000
49d66772 1001 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1002 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1003 break;
1004
a2fbb9ea 1005 default:
34f80b04 1006 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1007 command, bp->state);
34f80b04 1008 break;
a2fbb9ea 1009 }
34f80b04 1010 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1011}
1012
7a9b2557
VZ
1013static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1014 struct bnx2x_fastpath *fp, u16 index)
1015{
1016 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1017 struct page *page = sw_buf->page;
1018 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1019
1020 /* Skip "next page" elements */
1021 if (!page)
1022 return;
1023
1024 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1025 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1026 __free_pages(page, PAGES_PER_SGE_SHIFT);
1027
1028 sw_buf->page = NULL;
1029 sge->addr_hi = 0;
1030 sge->addr_lo = 0;
1031}
1032
1033static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1034 struct bnx2x_fastpath *fp, int last)
1035{
1036 int i;
1037
1038 for (i = 0; i < last; i++)
1039 bnx2x_free_rx_sge(bp, fp, i);
1040}
1041
1042static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1043 struct bnx2x_fastpath *fp, u16 index)
1044{
1045 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1046 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1047 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1048 dma_addr_t mapping;
1049
1050 if (unlikely(page == NULL))
1051 return -ENOMEM;
1052
4f40f2cb 1053 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1054 PCI_DMA_FROMDEVICE);
8d8bb39b 1055 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1056 __free_pages(page, PAGES_PER_SGE_SHIFT);
1057 return -ENOMEM;
1058 }
1059
1060 sw_buf->page = page;
1061 pci_unmap_addr_set(sw_buf, mapping, mapping);
1062
1063 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1064 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1065
1066 return 0;
1067}
1068
a2fbb9ea
ET
1069static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1070 struct bnx2x_fastpath *fp, u16 index)
1071{
1072 struct sk_buff *skb;
1073 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1074 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1075 dma_addr_t mapping;
1076
1077 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1078 if (unlikely(skb == NULL))
1079 return -ENOMEM;
1080
437cf2f1 1081 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1082 PCI_DMA_FROMDEVICE);
8d8bb39b 1083 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1084 dev_kfree_skb(skb);
1085 return -ENOMEM;
1086 }
1087
1088 rx_buf->skb = skb;
1089 pci_unmap_addr_set(rx_buf, mapping, mapping);
1090
1091 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1092 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1093
1094 return 0;
1095}
1096
1097/* note that we are not allocating a new skb,
1098 * we are just moving one from cons to prod
1099 * we are not creating a new mapping,
1100 * so there is no need to check for dma_mapping_error().
1101 */
1102static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1103 struct sk_buff *skb, u16 cons, u16 prod)
1104{
1105 struct bnx2x *bp = fp->bp;
1106 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1107 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1108 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1109 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1110
1111 pci_dma_sync_single_for_device(bp->pdev,
1112 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1113 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1114
1115 prod_rx_buf->skb = cons_rx_buf->skb;
1116 pci_unmap_addr_set(prod_rx_buf, mapping,
1117 pci_unmap_addr(cons_rx_buf, mapping));
1118 *prod_bd = *cons_bd;
1119}
1120
7a9b2557
VZ
1121static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1122 u16 idx)
1123{
1124 u16 last_max = fp->last_max_sge;
1125
1126 if (SUB_S16(idx, last_max) > 0)
1127 fp->last_max_sge = idx;
1128}
1129
1130static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1131{
1132 int i, j;
1133
1134 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1135 int idx = RX_SGE_CNT * i - 1;
1136
1137 for (j = 0; j < 2; j++) {
1138 SGE_MASK_CLEAR_BIT(fp, idx);
1139 idx--;
1140 }
1141 }
1142}
1143
1144static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1145 struct eth_fast_path_rx_cqe *fp_cqe)
1146{
1147 struct bnx2x *bp = fp->bp;
4f40f2cb 1148 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1149 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1150 SGE_PAGE_SHIFT;
7a9b2557
VZ
1151 u16 last_max, last_elem, first_elem;
1152 u16 delta = 0;
1153 u16 i;
1154
1155 if (!sge_len)
1156 return;
1157
1158 /* First mark all used pages */
1159 for (i = 0; i < sge_len; i++)
1160 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1161
1162 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1163 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1164
1165 /* Here we assume that the last SGE index is the biggest */
1166 prefetch((void *)(fp->sge_mask));
1167 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1168
1169 last_max = RX_SGE(fp->last_max_sge);
1170 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1171 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1172
1173 /* If ring is not full */
1174 if (last_elem + 1 != first_elem)
1175 last_elem++;
1176
1177 /* Now update the prod */
1178 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1179 if (likely(fp->sge_mask[i]))
1180 break;
1181
1182 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1183 delta += RX_SGE_MASK_ELEM_SZ;
1184 }
1185
1186 if (delta > 0) {
1187 fp->rx_sge_prod += delta;
1188 /* clear page-end entries */
1189 bnx2x_clear_sge_mask_next_elems(fp);
1190 }
1191
1192 DP(NETIF_MSG_RX_STATUS,
1193 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1194 fp->last_max_sge, fp->rx_sge_prod);
1195}
1196
1197static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1198{
1199 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1200 memset(fp->sge_mask, 0xff,
1201 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1202
33471629
EG
1203 /* Clear the two last indices in the page to 1:
1204 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1205 hence will never be indicated and should be removed from
1206 the calculations. */
1207 bnx2x_clear_sge_mask_next_elems(fp);
1208}
1209
1210static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1212{
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1217 dma_addr_t mapping;
1218
1219 /* move empty skb from pool to prod and map it */
1220 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1221 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1222 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1223 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1224
1225 /* move partial skb from cons to pool (don't unmap yet) */
1226 fp->tpa_pool[queue] = *cons_rx_buf;
1227
1228 /* mark bin state as start - print error if current state != stop */
1229 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1230 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1231
1232 fp->tpa_state[queue] = BNX2X_TPA_START;
1233
1234 /* point prod_bd to new skb */
1235 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1236 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1237
1238#ifdef BNX2X_STOP_ON_ERROR
1239 fp->tpa_queue_used |= (1 << queue);
1240#ifdef __powerpc64__
1241 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1242#else
1243 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1244#endif
1245 fp->tpa_queue_used);
1246#endif
1247}
1248
1249static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1250 struct sk_buff *skb,
1251 struct eth_fast_path_rx_cqe *fp_cqe,
1252 u16 cqe_idx)
1253{
1254 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1255 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1256 u32 i, frag_len, frag_size, pages;
1257 int err;
1258 int j;
1259
1260 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1261 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1262
1263 /* This is needed in order to enable forwarding support */
1264 if (frag_size)
4f40f2cb 1265 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1266 max(frag_size, (u32)len_on_bd));
1267
1268#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1269 if (pages >
1270 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1271 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1272 pages, cqe_idx);
1273 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1274 fp_cqe->pkt_len, len_on_bd);
1275 bnx2x_panic();
1276 return -EINVAL;
1277 }
1278#endif
1279
1280 /* Run through the SGL and compose the fragmented skb */
1281 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1282 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1283
1284 /* FW gives the indices of the SGE as if the ring is an array
1285 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1286 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1287 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1288 old_rx_pg = *rx_pg;
1289
1290 /* If we fail to allocate a substitute page, we simply stop
1291 where we are and drop the whole packet */
1292 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1293 if (unlikely(err)) {
de832a55 1294 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1295 return err;
1296 }
1297
1298 /* Unmap the page as we r going to pass it to the stack */
1299 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1300 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1301
1302 /* Add one frag and update the appropriate fields in the skb */
1303 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1304
1305 skb->data_len += frag_len;
1306 skb->truesize += frag_len;
1307 skb->len += frag_len;
1308
1309 frag_size -= frag_len;
1310 }
1311
1312 return 0;
1313}
1314
1315static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1316 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1317 u16 cqe_idx)
1318{
1319 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1320 struct sk_buff *skb = rx_buf->skb;
1321 /* alloc new skb */
1322 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1323
1324 /* Unmap skb in the pool anyway, as we are going to change
1325 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1326 fails. */
1327 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1328 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1329
7a9b2557 1330 if (likely(new_skb)) {
66e855f3
YG
1331 /* fix ip xsum and give it to the stack */
1332 /* (no need to map the new skb) */
0c6671b0
EG
1333#ifdef BCM_VLAN
1334 int is_vlan_cqe =
1335 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1336 PARSING_FLAGS_VLAN);
1337 int is_not_hwaccel_vlan_cqe =
1338 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1339#endif
7a9b2557
VZ
1340
1341 prefetch(skb);
1342 prefetch(((char *)(skb)) + 128);
1343
7a9b2557
VZ
1344#ifdef BNX2X_STOP_ON_ERROR
1345 if (pad + len > bp->rx_buf_size) {
1346 BNX2X_ERR("skb_put is about to fail... "
1347 "pad %d len %d rx_buf_size %d\n",
1348 pad, len, bp->rx_buf_size);
1349 bnx2x_panic();
1350 return;
1351 }
1352#endif
1353
1354 skb_reserve(skb, pad);
1355 skb_put(skb, len);
1356
1357 skb->protocol = eth_type_trans(skb, bp->dev);
1358 skb->ip_summed = CHECKSUM_UNNECESSARY;
1359
1360 {
1361 struct iphdr *iph;
1362
1363 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1364#ifdef BCM_VLAN
1365 /* If there is no Rx VLAN offloading -
1366 take VLAN tag into an account */
1367 if (unlikely(is_not_hwaccel_vlan_cqe))
1368 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1369#endif
7a9b2557
VZ
1370 iph->check = 0;
1371 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1372 }
1373
1374 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1375 &cqe->fast_path_cqe, cqe_idx)) {
1376#ifdef BCM_VLAN
0c6671b0
EG
1377 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1378 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1379 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1380 le16_to_cpu(cqe->fast_path_cqe.
1381 vlan_tag));
1382 else
1383#endif
1384 netif_receive_skb(skb);
1385 } else {
1386 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1387 " - dropping packet!\n");
1388 dev_kfree_skb(skb);
1389 }
1390
7a9b2557
VZ
1391
1392 /* put new skb in bin */
1393 fp->tpa_pool[queue].skb = new_skb;
1394
1395 } else {
66e855f3 1396 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1397 DP(NETIF_MSG_RX_STATUS,
1398 "Failed to allocate new skb - dropping packet!\n");
de832a55 1399 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1400 }
1401
1402 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1403}
1404
1405static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1406 struct bnx2x_fastpath *fp,
1407 u16 bd_prod, u16 rx_comp_prod,
1408 u16 rx_sge_prod)
1409{
8d9c5f34 1410 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1411 int i;
1412
1413 /* Update producers */
1414 rx_prods.bd_prod = bd_prod;
1415 rx_prods.cqe_prod = rx_comp_prod;
1416 rx_prods.sge_prod = rx_sge_prod;
1417
58f4c4cf
EG
1418 /*
1419 * Make sure that the BD and SGE data is updated before updating the
1420 * producers since FW might read the BD/SGE right after the producer
1421 * is updated.
1422 * This is only applicable for weak-ordered memory model archs such
1423 * as IA-64. The following barrier is also mandatory since FW will
1424 * assumes BDs must have buffers.
1425 */
1426 wmb();
1427
8d9c5f34
EG
1428 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1429 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1430 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1431 ((u32 *)&rx_prods)[i]);
1432
58f4c4cf
EG
1433 mmiowb(); /* keep prod updates ordered */
1434
7a9b2557 1435 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1436 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1437 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1438}
1439
a2fbb9ea
ET
1440static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1441{
1442 struct bnx2x *bp = fp->bp;
34f80b04 1443 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1444 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1445 int rx_pkt = 0;
1446
1447#ifdef BNX2X_STOP_ON_ERROR
1448 if (unlikely(bp->panic))
1449 return 0;
1450#endif
1451
34f80b04
EG
1452 /* CQ "next element" is of the size of the regular element,
1453 that's why it's ok here */
a2fbb9ea
ET
1454 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1455 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1456 hw_comp_cons++;
1457
1458 bd_cons = fp->rx_bd_cons;
1459 bd_prod = fp->rx_bd_prod;
34f80b04 1460 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1461 sw_comp_cons = fp->rx_comp_cons;
1462 sw_comp_prod = fp->rx_comp_prod;
1463
1464 /* Memory barrier necessary as speculative reads of the rx
1465 * buffer can be ahead of the index in the status block
1466 */
1467 rmb();
1468
1469 DP(NETIF_MSG_RX_STATUS,
1470 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1471 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1472
1473 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1474 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1475 struct sk_buff *skb;
1476 union eth_rx_cqe *cqe;
34f80b04
EG
1477 u8 cqe_fp_flags;
1478 u16 len, pad;
a2fbb9ea
ET
1479
1480 comp_ring_cons = RCQ_BD(sw_comp_cons);
1481 bd_prod = RX_BD(bd_prod);
1482 bd_cons = RX_BD(bd_cons);
1483
1484 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1485 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1486
a2fbb9ea 1487 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1488 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1489 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1490 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1491 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1492 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1493
1494 /* is this a slowpath msg? */
34f80b04 1495 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1496 bnx2x_sp_event(fp, cqe);
1497 goto next_cqe;
1498
1499 /* this is an rx packet */
1500 } else {
1501 rx_buf = &fp->rx_buf_ring[bd_cons];
1502 skb = rx_buf->skb;
a2fbb9ea
ET
1503 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1504 pad = cqe->fast_path_cqe.placement_offset;
1505
7a9b2557
VZ
1506 /* If CQE is marked both TPA_START and TPA_END
1507 it is a non-TPA CQE */
1508 if ((!fp->disable_tpa) &&
1509 (TPA_TYPE(cqe_fp_flags) !=
1510 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1511 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1512
1513 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1514 DP(NETIF_MSG_RX_STATUS,
1515 "calling tpa_start on queue %d\n",
1516 queue);
1517
1518 bnx2x_tpa_start(fp, queue, skb,
1519 bd_cons, bd_prod);
1520 goto next_rx;
1521 }
1522
1523 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1524 DP(NETIF_MSG_RX_STATUS,
1525 "calling tpa_stop on queue %d\n",
1526 queue);
1527
1528 if (!BNX2X_RX_SUM_FIX(cqe))
1529 BNX2X_ERR("STOP on none TCP "
1530 "data\n");
1531
1532 /* This is a size of the linear data
1533 on this skb */
1534 len = le16_to_cpu(cqe->fast_path_cqe.
1535 len_on_bd);
1536 bnx2x_tpa_stop(bp, fp, queue, pad,
1537 len, cqe, comp_ring_cons);
1538#ifdef BNX2X_STOP_ON_ERROR
1539 if (bp->panic)
1540 return -EINVAL;
1541#endif
1542
1543 bnx2x_update_sge_prod(fp,
1544 &cqe->fast_path_cqe);
1545 goto next_cqe;
1546 }
1547 }
1548
a2fbb9ea
ET
1549 pci_dma_sync_single_for_device(bp->pdev,
1550 pci_unmap_addr(rx_buf, mapping),
1551 pad + RX_COPY_THRESH,
1552 PCI_DMA_FROMDEVICE);
1553 prefetch(skb);
1554 prefetch(((char *)(skb)) + 128);
1555
1556 /* is this an error packet? */
34f80b04 1557 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1558 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1559 "ERROR flags %x rx packet %u\n",
1560 cqe_fp_flags, sw_comp_cons);
de832a55 1561 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1562 goto reuse_rx;
1563 }
1564
1565 /* Since we don't have a jumbo ring
1566 * copy small packets if mtu > 1500
1567 */
1568 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1569 (len <= RX_COPY_THRESH)) {
1570 struct sk_buff *new_skb;
1571
1572 new_skb = netdev_alloc_skb(bp->dev,
1573 len + pad);
1574 if (new_skb == NULL) {
1575 DP(NETIF_MSG_RX_ERR,
34f80b04 1576 "ERROR packet dropped "
a2fbb9ea 1577 "because of alloc failure\n");
de832a55 1578 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1579 goto reuse_rx;
1580 }
1581
1582 /* aligned copy */
1583 skb_copy_from_linear_data_offset(skb, pad,
1584 new_skb->data + pad, len);
1585 skb_reserve(new_skb, pad);
1586 skb_put(new_skb, len);
1587
1588 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1589
1590 skb = new_skb;
1591
1592 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1593 pci_unmap_single(bp->pdev,
1594 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1595 bp->rx_buf_size,
a2fbb9ea
ET
1596 PCI_DMA_FROMDEVICE);
1597 skb_reserve(skb, pad);
1598 skb_put(skb, len);
1599
1600 } else {
1601 DP(NETIF_MSG_RX_ERR,
34f80b04 1602 "ERROR packet dropped because "
a2fbb9ea 1603 "of alloc failure\n");
de832a55 1604 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1605reuse_rx:
1606 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1607 goto next_rx;
1608 }
1609
1610 skb->protocol = eth_type_trans(skb, bp->dev);
1611
1612 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1613 if (bp->rx_csum) {
1adcd8be
EG
1614 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1615 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1616 else
de832a55 1617 fp->eth_q_stats.hw_csum_err++;
66e855f3 1618 }
a2fbb9ea
ET
1619 }
1620
748e5439 1621 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1622#ifdef BCM_VLAN
0c6671b0 1623 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1624 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1625 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1626 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1627 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1628 else
1629#endif
34f80b04 1630 netif_receive_skb(skb);
a2fbb9ea 1631
a2fbb9ea
ET
1632
1633next_rx:
1634 rx_buf->skb = NULL;
1635
1636 bd_cons = NEXT_RX_IDX(bd_cons);
1637 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1638 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1639 rx_pkt++;
a2fbb9ea
ET
1640next_cqe:
1641 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1642 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1643
34f80b04 1644 if (rx_pkt == budget)
a2fbb9ea
ET
1645 break;
1646 } /* while */
1647
1648 fp->rx_bd_cons = bd_cons;
34f80b04 1649 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1650 fp->rx_comp_cons = sw_comp_cons;
1651 fp->rx_comp_prod = sw_comp_prod;
1652
7a9b2557
VZ
1653 /* Update producers */
1654 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1655 fp->rx_sge_prod);
a2fbb9ea
ET
1656
1657 fp->rx_pkt += rx_pkt;
1658 fp->rx_calls++;
1659
1660 return rx_pkt;
1661}
1662
1663static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1664{
1665 struct bnx2x_fastpath *fp = fp_cookie;
1666 struct bnx2x *bp = fp->bp;
0626b899 1667 int index = fp->index;
a2fbb9ea 1668
da5a662a
VZ
1669 /* Return here if interrupt is disabled */
1670 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1671 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1672 return IRQ_HANDLED;
1673 }
1674
34f80b04 1675 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1676 index, fp->sb_id);
1677 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1678
1679#ifdef BNX2X_STOP_ON_ERROR
1680 if (unlikely(bp->panic))
1681 return IRQ_HANDLED;
1682#endif
1683
1684 prefetch(fp->rx_cons_sb);
1685 prefetch(fp->tx_cons_sb);
1686 prefetch(&fp->status_blk->c_status_block.status_block_index);
1687 prefetch(&fp->status_blk->u_status_block.status_block_index);
1688
288379f0 1689 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1690
a2fbb9ea
ET
1691 return IRQ_HANDLED;
1692}
1693
1694static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1695{
555f6c78 1696 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1697 u16 status = bnx2x_ack_int(bp);
34f80b04 1698 u16 mask;
a2fbb9ea 1699
34f80b04 1700 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1701 if (unlikely(status == 0)) {
1702 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1703 return IRQ_NONE;
1704 }
f5372251 1705 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1706
34f80b04 1707 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1708 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1709 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1710 return IRQ_HANDLED;
1711 }
1712
3196a88a
EG
1713#ifdef BNX2X_STOP_ON_ERROR
1714 if (unlikely(bp->panic))
1715 return IRQ_HANDLED;
1716#endif
1717
34f80b04
EG
1718 mask = 0x2 << bp->fp[0].sb_id;
1719 if (status & mask) {
a2fbb9ea
ET
1720 struct bnx2x_fastpath *fp = &bp->fp[0];
1721
1722 prefetch(fp->rx_cons_sb);
1723 prefetch(fp->tx_cons_sb);
1724 prefetch(&fp->status_blk->c_status_block.status_block_index);
1725 prefetch(&fp->status_blk->u_status_block.status_block_index);
1726
288379f0 1727 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1728
34f80b04 1729 status &= ~mask;
a2fbb9ea
ET
1730 }
1731
a2fbb9ea 1732
34f80b04 1733 if (unlikely(status & 0x1)) {
1cf167f2 1734 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1735
1736 status &= ~0x1;
1737 if (!status)
1738 return IRQ_HANDLED;
1739 }
1740
34f80b04
EG
1741 if (status)
1742 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1743 status);
a2fbb9ea 1744
c18487ee 1745 return IRQ_HANDLED;
a2fbb9ea
ET
1746}
1747
c18487ee 1748/* end of fast path */
a2fbb9ea 1749
bb2a0f7a 1750static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1751
c18487ee
YR
1752/* Link */
1753
1754/*
1755 * General service functions
1756 */
a2fbb9ea 1757
4a37fb66 1758static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1759{
1760 u32 lock_status;
1761 u32 resource_bit = (1 << resource);
4a37fb66
YG
1762 int func = BP_FUNC(bp);
1763 u32 hw_lock_control_reg;
c18487ee 1764 int cnt;
a2fbb9ea 1765
c18487ee
YR
1766 /* Validating that the resource is within range */
1767 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1768 DP(NETIF_MSG_HW,
1769 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1770 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1771 return -EINVAL;
1772 }
a2fbb9ea 1773
4a37fb66
YG
1774 if (func <= 5) {
1775 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1776 } else {
1777 hw_lock_control_reg =
1778 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1779 }
1780
c18487ee 1781 /* Validating that the resource is not already taken */
4a37fb66 1782 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1783 if (lock_status & resource_bit) {
1784 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1785 lock_status, resource_bit);
1786 return -EEXIST;
1787 }
a2fbb9ea 1788
46230476
EG
1789 /* Try for 5 second every 5ms */
1790 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1791 /* Try to acquire the lock */
4a37fb66
YG
1792 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1793 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1794 if (lock_status & resource_bit)
1795 return 0;
a2fbb9ea 1796
c18487ee 1797 msleep(5);
a2fbb9ea 1798 }
c18487ee
YR
1799 DP(NETIF_MSG_HW, "Timeout\n");
1800 return -EAGAIN;
1801}
a2fbb9ea 1802
4a37fb66 1803static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1804{
1805 u32 lock_status;
1806 u32 resource_bit = (1 << resource);
4a37fb66
YG
1807 int func = BP_FUNC(bp);
1808 u32 hw_lock_control_reg;
a2fbb9ea 1809
c18487ee
YR
1810 /* Validating that the resource is within range */
1811 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1812 DP(NETIF_MSG_HW,
1813 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1814 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1815 return -EINVAL;
1816 }
1817
4a37fb66
YG
1818 if (func <= 5) {
1819 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1820 } else {
1821 hw_lock_control_reg =
1822 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1823 }
1824
c18487ee 1825 /* Validating that the resource is currently taken */
4a37fb66 1826 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1827 if (!(lock_status & resource_bit)) {
1828 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1829 lock_status, resource_bit);
1830 return -EFAULT;
a2fbb9ea
ET
1831 }
1832
4a37fb66 1833 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1834 return 0;
1835}
1836
1837/* HW Lock for shared dual port PHYs */
4a37fb66 1838static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1839{
34f80b04 1840 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1841
46c6a674
EG
1842 if (bp->port.need_hw_lock)
1843 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1844}
a2fbb9ea 1845
4a37fb66 1846static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1847{
46c6a674
EG
1848 if (bp->port.need_hw_lock)
1849 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1850
34f80b04 1851 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1852}
a2fbb9ea 1853
4acac6a5
EG
1854int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1855{
1856 /* The GPIO should be swapped if swap register is set and active */
1857 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1858 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1859 int gpio_shift = gpio_num +
1860 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1861 u32 gpio_mask = (1 << gpio_shift);
1862 u32 gpio_reg;
1863 int value;
1864
1865 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1866 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1867 return -EINVAL;
1868 }
1869
1870 /* read GPIO value */
1871 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1872
1873 /* get the requested pin value */
1874 if ((gpio_reg & gpio_mask) == gpio_mask)
1875 value = 1;
1876 else
1877 value = 0;
1878
1879 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1880
1881 return value;
1882}
1883
17de50b7 1884int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1885{
1886 /* The GPIO should be swapped if swap register is set and active */
1887 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1888 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1889 int gpio_shift = gpio_num +
1890 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1891 u32 gpio_mask = (1 << gpio_shift);
1892 u32 gpio_reg;
a2fbb9ea 1893
c18487ee
YR
1894 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1895 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1896 return -EINVAL;
1897 }
a2fbb9ea 1898
4a37fb66 1899 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1900 /* read GPIO and mask except the float bits */
1901 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1902
c18487ee
YR
1903 switch (mode) {
1904 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1905 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1906 gpio_num, gpio_shift);
1907 /* clear FLOAT and set CLR */
1908 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1909 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1910 break;
a2fbb9ea 1911
c18487ee
YR
1912 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1913 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1914 gpio_num, gpio_shift);
1915 /* clear FLOAT and set SET */
1916 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1917 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1918 break;
a2fbb9ea 1919
17de50b7 1920 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1921 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1922 gpio_num, gpio_shift);
1923 /* set FLOAT */
1924 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1925 break;
a2fbb9ea 1926
c18487ee
YR
1927 default:
1928 break;
a2fbb9ea
ET
1929 }
1930
c18487ee 1931 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1932 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1933
c18487ee 1934 return 0;
a2fbb9ea
ET
1935}
1936
4acac6a5
EG
1937int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1938{
1939 /* The GPIO should be swapped if swap register is set and active */
1940 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1941 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1942 int gpio_shift = gpio_num +
1943 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1944 u32 gpio_mask = (1 << gpio_shift);
1945 u32 gpio_reg;
1946
1947 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1948 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1949 return -EINVAL;
1950 }
1951
1952 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1953 /* read GPIO int */
1954 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1955
1956 switch (mode) {
1957 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1958 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1959 "output low\n", gpio_num, gpio_shift);
1960 /* clear SET and set CLR */
1961 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1962 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1963 break;
1964
1965 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1966 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1967 "output high\n", gpio_num, gpio_shift);
1968 /* clear CLR and set SET */
1969 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1970 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1971 break;
1972
1973 default:
1974 break;
1975 }
1976
1977 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1978 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1979
1980 return 0;
1981}
1982
c18487ee 1983static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1984{
c18487ee
YR
1985 u32 spio_mask = (1 << spio_num);
1986 u32 spio_reg;
a2fbb9ea 1987
c18487ee
YR
1988 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1989 (spio_num > MISC_REGISTERS_SPIO_7)) {
1990 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1991 return -EINVAL;
a2fbb9ea
ET
1992 }
1993
4a37fb66 1994 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1995 /* read SPIO and mask except the float bits */
1996 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1997
c18487ee 1998 switch (mode) {
6378c025 1999 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2000 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2001 /* clear FLOAT and set CLR */
2002 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2003 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2004 break;
a2fbb9ea 2005
6378c025 2006 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2007 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2008 /* clear FLOAT and set SET */
2009 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2010 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2011 break;
a2fbb9ea 2012
c18487ee
YR
2013 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2014 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2015 /* set FLOAT */
2016 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2017 break;
a2fbb9ea 2018
c18487ee
YR
2019 default:
2020 break;
a2fbb9ea
ET
2021 }
2022
c18487ee 2023 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2024 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2025
a2fbb9ea
ET
2026 return 0;
2027}
2028
c18487ee 2029static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2030{
ad33ea3a
EG
2031 switch (bp->link_vars.ieee_fc &
2032 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2033 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2034 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2035 ADVERTISED_Pause);
2036 break;
356e2385 2037
c18487ee 2038 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2039 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2040 ADVERTISED_Pause);
2041 break;
356e2385 2042
c18487ee 2043 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2044 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2045 break;
356e2385 2046
c18487ee 2047 default:
34f80b04 2048 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2049 ADVERTISED_Pause);
2050 break;
2051 }
2052}
f1410647 2053
c18487ee
YR
2054static void bnx2x_link_report(struct bnx2x *bp)
2055{
2056 if (bp->link_vars.link_up) {
2057 if (bp->state == BNX2X_STATE_OPEN)
2058 netif_carrier_on(bp->dev);
2059 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2060
c18487ee 2061 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2062
c18487ee
YR
2063 if (bp->link_vars.duplex == DUPLEX_FULL)
2064 printk("full duplex");
2065 else
2066 printk("half duplex");
f1410647 2067
c0700f90
DM
2068 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2069 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2070 printk(", receive ");
356e2385
EG
2071 if (bp->link_vars.flow_ctrl &
2072 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2073 printk("& transmit ");
2074 } else {
2075 printk(", transmit ");
2076 }
2077 printk("flow control ON");
2078 }
2079 printk("\n");
f1410647 2080
c18487ee
YR
2081 } else { /* link_down */
2082 netif_carrier_off(bp->dev);
2083 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2084 }
c18487ee
YR
2085}
2086
b5bf9068 2087static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2088{
19680c48
EG
2089 if (!BP_NOMCP(bp)) {
2090 u8 rc;
a2fbb9ea 2091
19680c48 2092 /* Initialize link parameters structure variables */
8c99e7b0
YR
2093 /* It is recommended to turn off RX FC for jumbo frames
2094 for better performance */
2095 if (IS_E1HMF(bp))
c0700f90 2096 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2097 else if (bp->dev->mtu > 5000)
c0700f90 2098 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2099 else
c0700f90 2100 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2101
4a37fb66 2102 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2103
2104 if (load_mode == LOAD_DIAG)
2105 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2106
19680c48 2107 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2108
4a37fb66 2109 bnx2x_release_phy_lock(bp);
a2fbb9ea 2110
3c96c68b
EG
2111 bnx2x_calc_fc_adv(bp);
2112
b5bf9068
EG
2113 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2114 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2115 bnx2x_link_report(bp);
b5bf9068 2116 }
34f80b04 2117
19680c48
EG
2118 return rc;
2119 }
f5372251 2120 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2121 return -EINVAL;
a2fbb9ea
ET
2122}
2123
c18487ee 2124static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2125{
19680c48 2126 if (!BP_NOMCP(bp)) {
4a37fb66 2127 bnx2x_acquire_phy_lock(bp);
19680c48 2128 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2129 bnx2x_release_phy_lock(bp);
a2fbb9ea 2130
19680c48
EG
2131 bnx2x_calc_fc_adv(bp);
2132 } else
f5372251 2133 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2134}
a2fbb9ea 2135
c18487ee
YR
2136static void bnx2x__link_reset(struct bnx2x *bp)
2137{
19680c48 2138 if (!BP_NOMCP(bp)) {
4a37fb66 2139 bnx2x_acquire_phy_lock(bp);
589abe3a 2140 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2141 bnx2x_release_phy_lock(bp);
19680c48 2142 } else
f5372251 2143 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2144}
a2fbb9ea 2145
c18487ee
YR
2146static u8 bnx2x_link_test(struct bnx2x *bp)
2147{
2148 u8 rc;
a2fbb9ea 2149
4a37fb66 2150 bnx2x_acquire_phy_lock(bp);
c18487ee 2151 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2152 bnx2x_release_phy_lock(bp);
a2fbb9ea 2153
c18487ee
YR
2154 return rc;
2155}
a2fbb9ea 2156
8a1c38d1 2157static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2158{
8a1c38d1
EG
2159 u32 r_param = bp->link_vars.line_speed / 8;
2160 u32 fair_periodic_timeout_usec;
2161 u32 t_fair;
34f80b04 2162
8a1c38d1
EG
2163 memset(&(bp->cmng.rs_vars), 0,
2164 sizeof(struct rate_shaping_vars_per_port));
2165 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2166
8a1c38d1
EG
2167 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2168 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2169
8a1c38d1
EG
2170 /* this is the threshold below which no timer arming will occur
2171 1.25 coefficient is for the threshold to be a little bigger
2172 than the real time, to compensate for timer in-accuracy */
2173 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2174 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2175
8a1c38d1
EG
2176 /* resolution of fairness timer */
2177 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2178 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2179 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2180
8a1c38d1
EG
2181 /* this is the threshold below which we won't arm the timer anymore */
2182 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2183
8a1c38d1
EG
2184 /* we multiply by 1e3/8 to get bytes/msec.
2185 We don't want the credits to pass a credit
2186 of the t_fair*FAIR_MEM (algorithm resolution) */
2187 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2188 /* since each tick is 4 usec */
2189 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2190}
2191
8a1c38d1 2192static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2193{
2194 struct rate_shaping_vars_per_vn m_rs_vn;
2195 struct fairness_vars_per_vn m_fair_vn;
2196 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2197 u16 vn_min_rate, vn_max_rate;
2198 int i;
2199
2200 /* If function is hidden - set min and max to zeroes */
2201 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2202 vn_min_rate = 0;
2203 vn_max_rate = 0;
2204
2205 } else {
2206 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2207 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2208 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2209 if current min rate is zero - set it to 1.
33471629 2210 This is a requirement of the algorithm. */
8a1c38d1 2211 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2212 vn_min_rate = DEF_MIN_RATE;
2213 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2214 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2215 }
2216
8a1c38d1
EG
2217 DP(NETIF_MSG_IFUP,
2218 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2219 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2220
2221 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2222 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2223
2224 /* global vn counter - maximal Mbps for this vn */
2225 m_rs_vn.vn_counter.rate = vn_max_rate;
2226
2227 /* quota - number of bytes transmitted in this period */
2228 m_rs_vn.vn_counter.quota =
2229 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2230
8a1c38d1 2231 if (bp->vn_weight_sum) {
34f80b04
EG
2232 /* credit for each period of the fairness algorithm:
2233 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2234 vn_weight_sum should not be larger than 10000, thus
2235 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2236 than zero */
34f80b04 2237 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2238 max((u32)(vn_min_rate * (T_FAIR_COEF /
2239 (8 * bp->vn_weight_sum))),
2240 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2241 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2242 m_fair_vn.vn_credit_delta);
2243 }
2244
34f80b04
EG
2245 /* Store it to internal memory */
2246 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2247 REG_WR(bp, BAR_XSTRORM_INTMEM +
2248 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2249 ((u32 *)(&m_rs_vn))[i]);
2250
2251 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2252 REG_WR(bp, BAR_XSTRORM_INTMEM +
2253 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2254 ((u32 *)(&m_fair_vn))[i]);
2255}
2256
8a1c38d1 2257
c18487ee
YR
2258/* This function is called upon link interrupt */
2259static void bnx2x_link_attn(struct bnx2x *bp)
2260{
bb2a0f7a
YG
2261 /* Make sure that we are synced with the current statistics */
2262 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2263
c18487ee 2264 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2265
bb2a0f7a
YG
2266 if (bp->link_vars.link_up) {
2267
1c06328c
EG
2268 /* dropless flow control */
2269 if (CHIP_IS_E1H(bp)) {
2270 int port = BP_PORT(bp);
2271 u32 pause_enabled = 0;
2272
2273 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2274 pause_enabled = 1;
2275
2276 REG_WR(bp, BAR_USTRORM_INTMEM +
2277 USTORM_PAUSE_ENABLED_OFFSET(port),
2278 pause_enabled);
2279 }
2280
bb2a0f7a
YG
2281 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2282 struct host_port_stats *pstats;
2283
2284 pstats = bnx2x_sp(bp, port_stats);
2285 /* reset old bmac stats */
2286 memset(&(pstats->mac_stx[0]), 0,
2287 sizeof(struct mac_stx));
2288 }
2289 if ((bp->state == BNX2X_STATE_OPEN) ||
2290 (bp->state == BNX2X_STATE_DISABLED))
2291 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2292 }
2293
c18487ee
YR
2294 /* indicate link status */
2295 bnx2x_link_report(bp);
34f80b04
EG
2296
2297 if (IS_E1HMF(bp)) {
8a1c38d1 2298 int port = BP_PORT(bp);
34f80b04 2299 int func;
8a1c38d1 2300 int vn;
34f80b04
EG
2301
2302 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2303 if (vn == BP_E1HVN(bp))
2304 continue;
2305
8a1c38d1 2306 func = ((vn << 1) | port);
34f80b04
EG
2307
2308 /* Set the attention towards other drivers
2309 on the same port */
2310 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2311 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2312 }
34f80b04 2313
8a1c38d1
EG
2314 if (bp->link_vars.link_up) {
2315 int i;
2316
2317 /* Init rate shaping and fairness contexts */
2318 bnx2x_init_port_minmax(bp);
34f80b04 2319
34f80b04 2320 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2321 bnx2x_init_vn_minmax(bp, 2*vn + port);
2322
2323 /* Store it to internal memory */
2324 for (i = 0;
2325 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2326 REG_WR(bp, BAR_XSTRORM_INTMEM +
2327 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2328 ((u32 *)(&bp->cmng))[i]);
2329 }
34f80b04 2330 }
c18487ee 2331}
a2fbb9ea 2332
c18487ee
YR
2333static void bnx2x__link_status_update(struct bnx2x *bp)
2334{
2335 if (bp->state != BNX2X_STATE_OPEN)
2336 return;
a2fbb9ea 2337
c18487ee 2338 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2339
bb2a0f7a
YG
2340 if (bp->link_vars.link_up)
2341 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2342 else
2343 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2344
c18487ee
YR
2345 /* indicate link status */
2346 bnx2x_link_report(bp);
a2fbb9ea 2347}
a2fbb9ea 2348
34f80b04
EG
2349static void bnx2x_pmf_update(struct bnx2x *bp)
2350{
2351 int port = BP_PORT(bp);
2352 u32 val;
2353
2354 bp->port.pmf = 1;
2355 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2356
2357 /* enable nig attention */
2358 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2359 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2360 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2361
2362 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2363}
2364
c18487ee 2365/* end of Link */
a2fbb9ea
ET
2366
2367/* slow path */
2368
2369/*
2370 * General service functions
2371 */
2372
2373/* the slow path queue is odd since completions arrive on the fastpath ring */
2374static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2375 u32 data_hi, u32 data_lo, int common)
2376{
34f80b04 2377 int func = BP_FUNC(bp);
a2fbb9ea 2378
34f80b04
EG
2379 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2380 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2381 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2382 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2383 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2384
2385#ifdef BNX2X_STOP_ON_ERROR
2386 if (unlikely(bp->panic))
2387 return -EIO;
2388#endif
2389
34f80b04 2390 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2391
2392 if (!bp->spq_left) {
2393 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2394 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2395 bnx2x_panic();
2396 return -EBUSY;
2397 }
f1410647 2398
a2fbb9ea
ET
2399 /* CID needs port number to be encoded int it */
2400 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2401 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2402 HW_CID(bp, cid)));
2403 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2404 if (common)
2405 bp->spq_prod_bd->hdr.type |=
2406 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2407
2408 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2409 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2410
2411 bp->spq_left--;
2412
2413 if (bp->spq_prod_bd == bp->spq_last_bd) {
2414 bp->spq_prod_bd = bp->spq;
2415 bp->spq_prod_idx = 0;
2416 DP(NETIF_MSG_TIMER, "end of spq\n");
2417
2418 } else {
2419 bp->spq_prod_bd++;
2420 bp->spq_prod_idx++;
2421 }
2422
34f80b04 2423 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2424 bp->spq_prod_idx);
2425
34f80b04 2426 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2427 return 0;
2428}
2429
2430/* acquire split MCP access lock register */
4a37fb66 2431static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2432{
a2fbb9ea 2433 u32 i, j, val;
34f80b04 2434 int rc = 0;
a2fbb9ea
ET
2435
2436 might_sleep();
2437 i = 100;
2438 for (j = 0; j < i*10; j++) {
2439 val = (1UL << 31);
2440 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2441 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2442 if (val & (1L << 31))
2443 break;
2444
2445 msleep(5);
2446 }
a2fbb9ea 2447 if (!(val & (1L << 31))) {
19680c48 2448 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2449 rc = -EBUSY;
2450 }
2451
2452 return rc;
2453}
2454
4a37fb66
YG
2455/* release split MCP access lock register */
2456static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2457{
2458 u32 val = 0;
2459
2460 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2461}
2462
2463static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2464{
2465 struct host_def_status_block *def_sb = bp->def_status_blk;
2466 u16 rc = 0;
2467
2468 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2469 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2470 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2471 rc |= 1;
2472 }
2473 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2474 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2475 rc |= 2;
2476 }
2477 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2478 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2479 rc |= 4;
2480 }
2481 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2482 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2483 rc |= 8;
2484 }
2485 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2486 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2487 rc |= 16;
2488 }
2489 return rc;
2490}
2491
2492/*
2493 * slow path service functions
2494 */
2495
2496static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2497{
34f80b04 2498 int port = BP_PORT(bp);
5c862848
EG
2499 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2500 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2501 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2502 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2503 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2504 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2505 u32 aeu_mask;
87942b46 2506 u32 nig_mask = 0;
a2fbb9ea 2507
a2fbb9ea
ET
2508 if (bp->attn_state & asserted)
2509 BNX2X_ERR("IGU ERROR\n");
2510
3fcaf2e5
EG
2511 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2512 aeu_mask = REG_RD(bp, aeu_addr);
2513
a2fbb9ea 2514 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2515 aeu_mask, asserted);
2516 aeu_mask &= ~(asserted & 0xff);
2517 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2518
3fcaf2e5
EG
2519 REG_WR(bp, aeu_addr, aeu_mask);
2520 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2521
3fcaf2e5 2522 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2523 bp->attn_state |= asserted;
3fcaf2e5 2524 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2525
2526 if (asserted & ATTN_HARD_WIRED_MASK) {
2527 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2528
a5e9a7cf
EG
2529 bnx2x_acquire_phy_lock(bp);
2530
877e9aa4 2531 /* save nig interrupt mask */
87942b46 2532 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2533 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2534
c18487ee 2535 bnx2x_link_attn(bp);
a2fbb9ea
ET
2536
2537 /* handle unicore attn? */
2538 }
2539 if (asserted & ATTN_SW_TIMER_4_FUNC)
2540 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2541
2542 if (asserted & GPIO_2_FUNC)
2543 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2544
2545 if (asserted & GPIO_3_FUNC)
2546 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2547
2548 if (asserted & GPIO_4_FUNC)
2549 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2550
2551 if (port == 0) {
2552 if (asserted & ATTN_GENERAL_ATTN_1) {
2553 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2554 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2555 }
2556 if (asserted & ATTN_GENERAL_ATTN_2) {
2557 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2558 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2559 }
2560 if (asserted & ATTN_GENERAL_ATTN_3) {
2561 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2562 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2563 }
2564 } else {
2565 if (asserted & ATTN_GENERAL_ATTN_4) {
2566 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2568 }
2569 if (asserted & ATTN_GENERAL_ATTN_5) {
2570 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2571 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2572 }
2573 if (asserted & ATTN_GENERAL_ATTN_6) {
2574 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2575 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2576 }
2577 }
2578
2579 } /* if hardwired */
2580
5c862848
EG
2581 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2582 asserted, hc_addr);
2583 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2584
2585 /* now set back the mask */
a5e9a7cf 2586 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2587 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2588 bnx2x_release_phy_lock(bp);
2589 }
a2fbb9ea
ET
2590}
2591
877e9aa4 2592static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2593{
34f80b04 2594 int port = BP_PORT(bp);
877e9aa4
ET
2595 int reg_offset;
2596 u32 val;
2597
34f80b04
EG
2598 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2599 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2600
34f80b04 2601 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2602
2603 val = REG_RD(bp, reg_offset);
2604 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2605 REG_WR(bp, reg_offset, val);
2606
2607 BNX2X_ERR("SPIO5 hw attention\n");
2608
35b19ba5
EG
2609 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2610 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2611 /* Fan failure attention */
2612
17de50b7 2613 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2614 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2615 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2616 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2617 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2618 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2619 /* mark the failure */
c18487ee 2620 bp->link_params.ext_phy_config &=
877e9aa4 2621 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2622 bp->link_params.ext_phy_config |=
877e9aa4
ET
2623 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2624 SHMEM_WR(bp,
2625 dev_info.port_hw_config[port].
2626 external_phy_config,
c18487ee 2627 bp->link_params.ext_phy_config);
877e9aa4
ET
2628 /* log the failure */
2629 printk(KERN_ERR PFX "Fan Failure on Network"
2630 " Controller %s has caused the driver to"
2631 " shutdown the card to prevent permanent"
2632 " damage. Please contact Dell Support for"
2633 " assistance\n", bp->dev->name);
2634 break;
2635
2636 default:
2637 break;
2638 }
2639 }
34f80b04 2640
589abe3a
EG
2641 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2642 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2643 bnx2x_acquire_phy_lock(bp);
2644 bnx2x_handle_module_detect_int(&bp->link_params);
2645 bnx2x_release_phy_lock(bp);
2646 }
2647
34f80b04
EG
2648 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2649
2650 val = REG_RD(bp, reg_offset);
2651 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2652 REG_WR(bp, reg_offset, val);
2653
2654 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2655 (attn & HW_INTERRUT_ASSERT_SET_0));
2656 bnx2x_panic();
2657 }
877e9aa4
ET
2658}
2659
2660static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2661{
2662 u32 val;
2663
0626b899 2664 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2665
2666 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2667 BNX2X_ERR("DB hw attention 0x%x\n", val);
2668 /* DORQ discard attention */
2669 if (val & 0x2)
2670 BNX2X_ERR("FATAL error from DORQ\n");
2671 }
34f80b04
EG
2672
2673 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2674
2675 int port = BP_PORT(bp);
2676 int reg_offset;
2677
2678 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2679 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2680
2681 val = REG_RD(bp, reg_offset);
2682 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2683 REG_WR(bp, reg_offset, val);
2684
2685 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2686 (attn & HW_INTERRUT_ASSERT_SET_1));
2687 bnx2x_panic();
2688 }
877e9aa4
ET
2689}
2690
2691static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2692{
2693 u32 val;
2694
2695 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2696
2697 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2698 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2699 /* CFC error attention */
2700 if (val & 0x2)
2701 BNX2X_ERR("FATAL error from CFC\n");
2702 }
2703
2704 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2705
2706 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2707 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2708 /* RQ_USDMDP_FIFO_OVERFLOW */
2709 if (val & 0x18000)
2710 BNX2X_ERR("FATAL error from PXP\n");
2711 }
34f80b04
EG
2712
2713 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2714
2715 int port = BP_PORT(bp);
2716 int reg_offset;
2717
2718 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2719 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2720
2721 val = REG_RD(bp, reg_offset);
2722 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2723 REG_WR(bp, reg_offset, val);
2724
2725 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2726 (attn & HW_INTERRUT_ASSERT_SET_2));
2727 bnx2x_panic();
2728 }
877e9aa4
ET
2729}
2730
2731static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2732{
34f80b04
EG
2733 u32 val;
2734
877e9aa4
ET
2735 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2736
34f80b04
EG
2737 if (attn & BNX2X_PMF_LINK_ASSERT) {
2738 int func = BP_FUNC(bp);
2739
2740 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2741 bnx2x__link_status_update(bp);
2742 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2743 DRV_STATUS_PMF)
2744 bnx2x_pmf_update(bp);
2745
2746 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2747
2748 BNX2X_ERR("MC assert!\n");
2749 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2751 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2752 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2753 bnx2x_panic();
2754
2755 } else if (attn & BNX2X_MCP_ASSERT) {
2756
2757 BNX2X_ERR("MCP assert!\n");
2758 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2759 bnx2x_fw_dump(bp);
877e9aa4
ET
2760
2761 } else
2762 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2763 }
2764
2765 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2766 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2767 if (attn & BNX2X_GRC_TIMEOUT) {
2768 val = CHIP_IS_E1H(bp) ?
2769 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2770 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2771 }
2772 if (attn & BNX2X_GRC_RSV) {
2773 val = CHIP_IS_E1H(bp) ?
2774 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2775 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2776 }
877e9aa4 2777 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2778 }
2779}
2780
2781static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2782{
a2fbb9ea
ET
2783 struct attn_route attn;
2784 struct attn_route group_mask;
34f80b04 2785 int port = BP_PORT(bp);
877e9aa4 2786 int index;
a2fbb9ea
ET
2787 u32 reg_addr;
2788 u32 val;
3fcaf2e5 2789 u32 aeu_mask;
a2fbb9ea
ET
2790
2791 /* need to take HW lock because MCP or other port might also
2792 try to handle this event */
4a37fb66 2793 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2794
2795 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2796 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2797 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2798 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2799 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2800 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2801
2802 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2803 if (deasserted & (1 << index)) {
2804 group_mask = bp->attn_group[index];
2805
34f80b04
EG
2806 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2807 index, group_mask.sig[0], group_mask.sig[1],
2808 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2809
877e9aa4
ET
2810 bnx2x_attn_int_deasserted3(bp,
2811 attn.sig[3] & group_mask.sig[3]);
2812 bnx2x_attn_int_deasserted1(bp,
2813 attn.sig[1] & group_mask.sig[1]);
2814 bnx2x_attn_int_deasserted2(bp,
2815 attn.sig[2] & group_mask.sig[2]);
2816 bnx2x_attn_int_deasserted0(bp,
2817 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2818
a2fbb9ea
ET
2819 if ((attn.sig[0] & group_mask.sig[0] &
2820 HW_PRTY_ASSERT_SET_0) ||
2821 (attn.sig[1] & group_mask.sig[1] &
2822 HW_PRTY_ASSERT_SET_1) ||
2823 (attn.sig[2] & group_mask.sig[2] &
2824 HW_PRTY_ASSERT_SET_2))
6378c025 2825 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2826 }
2827 }
2828
4a37fb66 2829 bnx2x_release_alr(bp);
a2fbb9ea 2830
5c862848 2831 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2832
2833 val = ~deasserted;
3fcaf2e5
EG
2834 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2835 val, reg_addr);
5c862848 2836 REG_WR(bp, reg_addr, val);
a2fbb9ea 2837
a2fbb9ea 2838 if (~bp->attn_state & deasserted)
3fcaf2e5 2839 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2840
2841 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2842 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2843
3fcaf2e5
EG
2844 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2845 aeu_mask = REG_RD(bp, reg_addr);
2846
2847 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2848 aeu_mask, deasserted);
2849 aeu_mask |= (deasserted & 0xff);
2850 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2851
3fcaf2e5
EG
2852 REG_WR(bp, reg_addr, aeu_mask);
2853 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2854
2855 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2856 bp->attn_state &= ~deasserted;
2857 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2858}
2859
2860static void bnx2x_attn_int(struct bnx2x *bp)
2861{
2862 /* read local copy of bits */
68d59484
EG
2863 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2864 attn_bits);
2865 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2866 attn_bits_ack);
a2fbb9ea
ET
2867 u32 attn_state = bp->attn_state;
2868
2869 /* look for changed bits */
2870 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2871 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2872
2873 DP(NETIF_MSG_HW,
2874 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2875 attn_bits, attn_ack, asserted, deasserted);
2876
2877 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2878 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2879
2880 /* handle bits that were raised */
2881 if (asserted)
2882 bnx2x_attn_int_asserted(bp, asserted);
2883
2884 if (deasserted)
2885 bnx2x_attn_int_deasserted(bp, deasserted);
2886}
2887
2888static void bnx2x_sp_task(struct work_struct *work)
2889{
1cf167f2 2890 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2891 u16 status;
2892
34f80b04 2893
a2fbb9ea
ET
2894 /* Return here if interrupt is disabled */
2895 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2896 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2897 return;
2898 }
2899
2900 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2901/* if (status == 0) */
2902/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2903
3196a88a 2904 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2905
877e9aa4
ET
2906 /* HW attentions */
2907 if (status & 0x1)
a2fbb9ea 2908 bnx2x_attn_int(bp);
a2fbb9ea 2909
68d59484 2910 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2911 IGU_INT_NOP, 1);
2912 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2913 IGU_INT_NOP, 1);
2914 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2915 IGU_INT_NOP, 1);
2916 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2917 IGU_INT_NOP, 1);
2918 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2919 IGU_INT_ENABLE, 1);
877e9aa4 2920
a2fbb9ea
ET
2921}
2922
2923static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2924{
2925 struct net_device *dev = dev_instance;
2926 struct bnx2x *bp = netdev_priv(dev);
2927
2928 /* Return here if interrupt is disabled */
2929 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2930 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2931 return IRQ_HANDLED;
2932 }
2933
8d9c5f34 2934 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2935
2936#ifdef BNX2X_STOP_ON_ERROR
2937 if (unlikely(bp->panic))
2938 return IRQ_HANDLED;
2939#endif
2940
1cf167f2 2941 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2942
2943 return IRQ_HANDLED;
2944}
2945
2946/* end of slow path */
2947
2948/* Statistics */
2949
2950/****************************************************************************
2951* Macros
2952****************************************************************************/
2953
a2fbb9ea
ET
2954/* sum[hi:lo] += add[hi:lo] */
2955#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2956 do { \
2957 s_lo += a_lo; \
f5ba6772 2958 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2959 } while (0)
2960
2961/* difference = minuend - subtrahend */
2962#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2963 do { \
bb2a0f7a
YG
2964 if (m_lo < s_lo) { \
2965 /* underflow */ \
a2fbb9ea 2966 d_hi = m_hi - s_hi; \
bb2a0f7a 2967 if (d_hi > 0) { \
6378c025 2968 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2969 d_hi--; \
2970 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2971 } else { \
6378c025 2972 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2973 d_hi = 0; \
2974 d_lo = 0; \
2975 } \
bb2a0f7a
YG
2976 } else { \
2977 /* m_lo >= s_lo */ \
a2fbb9ea 2978 if (m_hi < s_hi) { \
bb2a0f7a
YG
2979 d_hi = 0; \
2980 d_lo = 0; \
2981 } else { \
6378c025 2982 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2983 d_hi = m_hi - s_hi; \
2984 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2985 } \
2986 } \
2987 } while (0)
2988
bb2a0f7a 2989#define UPDATE_STAT64(s, t) \
a2fbb9ea 2990 do { \
bb2a0f7a
YG
2991 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2992 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2993 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2994 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2995 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2996 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2997 } while (0)
2998
bb2a0f7a 2999#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3000 do { \
bb2a0f7a
YG
3001 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3002 diff.lo, new->s##_lo, old->s##_lo); \
3003 ADD_64(estats->t##_hi, diff.hi, \
3004 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3005 } while (0)
3006
3007/* sum[hi:lo] += add */
3008#define ADD_EXTEND_64(s_hi, s_lo, a) \
3009 do { \
3010 s_lo += a; \
3011 s_hi += (s_lo < a) ? 1 : 0; \
3012 } while (0)
3013
bb2a0f7a 3014#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3015 do { \
bb2a0f7a
YG
3016 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3017 pstats->mac_stx[1].s##_lo, \
3018 new->s); \
a2fbb9ea
ET
3019 } while (0)
3020
bb2a0f7a 3021#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3022 do { \
4781bfad
EG
3023 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3024 old_tclient->s = tclient->s; \
de832a55
EG
3025 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3026 } while (0)
3027
3028#define UPDATE_EXTEND_USTAT(s, t) \
3029 do { \
3030 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3031 old_uclient->s = uclient->s; \
3032 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3033 } while (0)
3034
3035#define UPDATE_EXTEND_XSTAT(s, t) \
3036 do { \
4781bfad
EG
3037 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3038 old_xclient->s = xclient->s; \
de832a55
EG
3039 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3040 } while (0)
3041
3042/* minuend -= subtrahend */
3043#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3044 do { \
3045 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3046 } while (0)
3047
3048/* minuend[hi:lo] -= subtrahend */
3049#define SUB_EXTEND_64(m_hi, m_lo, s) \
3050 do { \
3051 SUB_64(m_hi, 0, m_lo, s); \
3052 } while (0)
3053
3054#define SUB_EXTEND_USTAT(s, t) \
3055 do { \
3056 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3057 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3058 } while (0)
3059
3060/*
3061 * General service functions
3062 */
3063
3064static inline long bnx2x_hilo(u32 *hiref)
3065{
3066 u32 lo = *(hiref + 1);
3067#if (BITS_PER_LONG == 64)
3068 u32 hi = *hiref;
3069
3070 return HILO_U64(hi, lo);
3071#else
3072 return lo;
3073#endif
3074}
3075
3076/*
3077 * Init service functions
3078 */
3079
bb2a0f7a
YG
3080static void bnx2x_storm_stats_post(struct bnx2x *bp)
3081{
3082 if (!bp->stats_pending) {
3083 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3084 int i, rc;
bb2a0f7a
YG
3085
3086 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3087 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3088 for_each_queue(bp, i)
3089 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3090
3091 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3092 ((u32 *)&ramrod_data)[1],
3093 ((u32 *)&ramrod_data)[0], 0);
3094 if (rc == 0) {
3095 /* stats ramrod has it's own slot on the spq */
3096 bp->spq_left++;
3097 bp->stats_pending = 1;
3098 }
3099 }
3100}
3101
3102static void bnx2x_stats_init(struct bnx2x *bp)
3103{
3104 int port = BP_PORT(bp);
de832a55 3105 int i;
bb2a0f7a 3106
de832a55 3107 bp->stats_pending = 0;
bb2a0f7a
YG
3108 bp->executer_idx = 0;
3109 bp->stats_counter = 0;
3110
3111 /* port stats */
3112 if (!BP_NOMCP(bp))
3113 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3114 else
3115 bp->port.port_stx = 0;
3116 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3117
3118 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3119 bp->port.old_nig_stats.brb_discard =
3120 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3121 bp->port.old_nig_stats.brb_truncate =
3122 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3123 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3124 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3125 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3126 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3127
3128 /* function stats */
de832a55
EG
3129 for_each_queue(bp, i) {
3130 struct bnx2x_fastpath *fp = &bp->fp[i];
3131
3132 memset(&fp->old_tclient, 0,
3133 sizeof(struct tstorm_per_client_stats));
3134 memset(&fp->old_uclient, 0,
3135 sizeof(struct ustorm_per_client_stats));
3136 memset(&fp->old_xclient, 0,
3137 sizeof(struct xstorm_per_client_stats));
3138 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3139 }
3140
bb2a0f7a 3141 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3142 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3143
3144 bp->stats_state = STATS_STATE_DISABLED;
3145 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3146 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3147}
3148
3149static void bnx2x_hw_stats_post(struct bnx2x *bp)
3150{
3151 struct dmae_command *dmae = &bp->stats_dmae;
3152 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3153
3154 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3155 if (CHIP_REV_IS_SLOW(bp))
3156 return;
bb2a0f7a
YG
3157
3158 /* loader */
3159 if (bp->executer_idx) {
3160 int loader_idx = PMF_DMAE_C(bp);
3161
3162 memset(dmae, 0, sizeof(struct dmae_command));
3163
3164 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3165 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3166 DMAE_CMD_DST_RESET |
3167#ifdef __BIG_ENDIAN
3168 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3169#else
3170 DMAE_CMD_ENDIANITY_DW_SWAP |
3171#endif
3172 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3173 DMAE_CMD_PORT_0) |
3174 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3175 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3176 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3177 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3178 sizeof(struct dmae_command) *
3179 (loader_idx + 1)) >> 2;
3180 dmae->dst_addr_hi = 0;
3181 dmae->len = sizeof(struct dmae_command) >> 2;
3182 if (CHIP_IS_E1(bp))
3183 dmae->len--;
3184 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3185 dmae->comp_addr_hi = 0;
3186 dmae->comp_val = 1;
3187
3188 *stats_comp = 0;
3189 bnx2x_post_dmae(bp, dmae, loader_idx);
3190
3191 } else if (bp->func_stx) {
3192 *stats_comp = 0;
3193 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3194 }
3195}
3196
3197static int bnx2x_stats_comp(struct bnx2x *bp)
3198{
3199 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3200 int cnt = 10;
3201
3202 might_sleep();
3203 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3204 if (!cnt) {
3205 BNX2X_ERR("timeout waiting for stats finished\n");
3206 break;
3207 }
3208 cnt--;
12469401 3209 msleep(1);
bb2a0f7a
YG
3210 }
3211 return 1;
3212}
3213
3214/*
3215 * Statistics service functions
3216 */
3217
3218static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3219{
3220 struct dmae_command *dmae;
3221 u32 opcode;
3222 int loader_idx = PMF_DMAE_C(bp);
3223 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3224
3225 /* sanity */
3226 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3227 BNX2X_ERR("BUG!\n");
3228 return;
3229 }
3230
3231 bp->executer_idx = 0;
3232
3233 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3234 DMAE_CMD_C_ENABLE |
3235 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3236#ifdef __BIG_ENDIAN
3237 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3238#else
3239 DMAE_CMD_ENDIANITY_DW_SWAP |
3240#endif
3241 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3242 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3243
3244 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3245 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3246 dmae->src_addr_lo = bp->port.port_stx >> 2;
3247 dmae->src_addr_hi = 0;
3248 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3249 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3250 dmae->len = DMAE_LEN32_RD_MAX;
3251 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252 dmae->comp_addr_hi = 0;
3253 dmae->comp_val = 1;
3254
3255 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3256 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3257 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3258 dmae->src_addr_hi = 0;
7a9b2557
VZ
3259 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3260 DMAE_LEN32_RD_MAX * 4);
3261 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3262 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3263 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3264 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3265 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3266 dmae->comp_val = DMAE_COMP_VAL;
3267
3268 *stats_comp = 0;
3269 bnx2x_hw_stats_post(bp);
3270 bnx2x_stats_comp(bp);
3271}
3272
3273static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3274{
3275 struct dmae_command *dmae;
34f80b04 3276 int port = BP_PORT(bp);
bb2a0f7a 3277 int vn = BP_E1HVN(bp);
a2fbb9ea 3278 u32 opcode;
bb2a0f7a 3279 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3280 u32 mac_addr;
bb2a0f7a
YG
3281 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3282
3283 /* sanity */
3284 if (!bp->link_vars.link_up || !bp->port.pmf) {
3285 BNX2X_ERR("BUG!\n");
3286 return;
3287 }
a2fbb9ea
ET
3288
3289 bp->executer_idx = 0;
bb2a0f7a
YG
3290
3291 /* MCP */
3292 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3293 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3294 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3295#ifdef __BIG_ENDIAN
bb2a0f7a 3296 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3297#else
bb2a0f7a 3298 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3299#endif
bb2a0f7a
YG
3300 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3301 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3302
bb2a0f7a 3303 if (bp->port.port_stx) {
a2fbb9ea
ET
3304
3305 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3306 dmae->opcode = opcode;
bb2a0f7a
YG
3307 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3308 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3309 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3310 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3311 dmae->len = sizeof(struct host_port_stats) >> 2;
3312 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3313 dmae->comp_addr_hi = 0;
3314 dmae->comp_val = 1;
a2fbb9ea
ET
3315 }
3316
bb2a0f7a
YG
3317 if (bp->func_stx) {
3318
3319 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3320 dmae->opcode = opcode;
3321 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3322 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3323 dmae->dst_addr_lo = bp->func_stx >> 2;
3324 dmae->dst_addr_hi = 0;
3325 dmae->len = sizeof(struct host_func_stats) >> 2;
3326 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3327 dmae->comp_addr_hi = 0;
3328 dmae->comp_val = 1;
a2fbb9ea
ET
3329 }
3330
bb2a0f7a 3331 /* MAC */
a2fbb9ea
ET
3332 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3333 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3334 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3335#ifdef __BIG_ENDIAN
3336 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3337#else
3338 DMAE_CMD_ENDIANITY_DW_SWAP |
3339#endif
bb2a0f7a
YG
3340 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3341 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3342
c18487ee 3343 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3344
3345 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3346 NIG_REG_INGRESS_BMAC0_MEM);
3347
3348 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3349 BIGMAC_REGISTER_TX_STAT_GTBYT */
3350 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3351 dmae->opcode = opcode;
3352 dmae->src_addr_lo = (mac_addr +
3353 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3354 dmae->src_addr_hi = 0;
3355 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3356 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3357 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3358 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3359 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3360 dmae->comp_addr_hi = 0;
3361 dmae->comp_val = 1;
3362
3363 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3364 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3365 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366 dmae->opcode = opcode;
3367 dmae->src_addr_lo = (mac_addr +
3368 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3369 dmae->src_addr_hi = 0;
3370 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3371 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3372 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3373 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3374 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3375 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3376 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3377 dmae->comp_addr_hi = 0;
3378 dmae->comp_val = 1;
3379
c18487ee 3380 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3381
3382 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3383
3384 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3385 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3386 dmae->opcode = opcode;
3387 dmae->src_addr_lo = (mac_addr +
3388 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3389 dmae->src_addr_hi = 0;
3390 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3391 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3392 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3393 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3394 dmae->comp_addr_hi = 0;
3395 dmae->comp_val = 1;
3396
3397 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3398 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3399 dmae->opcode = opcode;
3400 dmae->src_addr_lo = (mac_addr +
3401 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3402 dmae->src_addr_hi = 0;
3403 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3404 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3405 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3406 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3407 dmae->len = 1;
3408 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3409 dmae->comp_addr_hi = 0;
3410 dmae->comp_val = 1;
3411
3412 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3413 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3414 dmae->opcode = opcode;
3415 dmae->src_addr_lo = (mac_addr +
3416 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3417 dmae->src_addr_hi = 0;
3418 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3419 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3420 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3421 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3422 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3423 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3424 dmae->comp_addr_hi = 0;
3425 dmae->comp_val = 1;
3426 }
3427
3428 /* NIG */
bb2a0f7a
YG
3429 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3430 dmae->opcode = opcode;
3431 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3432 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3433 dmae->src_addr_hi = 0;
3434 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3435 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3436 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3437 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3438 dmae->comp_addr_hi = 0;
3439 dmae->comp_val = 1;
3440
3441 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3442 dmae->opcode = opcode;
3443 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3444 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3445 dmae->src_addr_hi = 0;
3446 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3447 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3448 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3449 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3450 dmae->len = (2*sizeof(u32)) >> 2;
3451 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3452 dmae->comp_addr_hi = 0;
3453 dmae->comp_val = 1;
3454
a2fbb9ea
ET
3455 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3456 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3457 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3458 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3459#ifdef __BIG_ENDIAN
3460 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3461#else
3462 DMAE_CMD_ENDIANITY_DW_SWAP |
3463#endif
bb2a0f7a
YG
3464 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3465 (vn << DMAE_CMD_E1HVN_SHIFT));
3466 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3467 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3468 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3469 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3470 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3471 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3472 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3473 dmae->len = (2*sizeof(u32)) >> 2;
3474 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3475 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3476 dmae->comp_val = DMAE_COMP_VAL;
3477
3478 *stats_comp = 0;
a2fbb9ea
ET
3479}
3480
bb2a0f7a 3481static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3482{
bb2a0f7a
YG
3483 struct dmae_command *dmae = &bp->stats_dmae;
3484 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3485
bb2a0f7a
YG
3486 /* sanity */
3487 if (!bp->func_stx) {
3488 BNX2X_ERR("BUG!\n");
3489 return;
3490 }
a2fbb9ea 3491
bb2a0f7a
YG
3492 bp->executer_idx = 0;
3493 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3494
bb2a0f7a
YG
3495 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3496 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3497 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3498#ifdef __BIG_ENDIAN
3499 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3500#else
3501 DMAE_CMD_ENDIANITY_DW_SWAP |
3502#endif
3503 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3504 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3505 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3506 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3507 dmae->dst_addr_lo = bp->func_stx >> 2;
3508 dmae->dst_addr_hi = 0;
3509 dmae->len = sizeof(struct host_func_stats) >> 2;
3510 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3511 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3512 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3513
bb2a0f7a
YG
3514 *stats_comp = 0;
3515}
a2fbb9ea 3516
bb2a0f7a
YG
3517static void bnx2x_stats_start(struct bnx2x *bp)
3518{
3519 if (bp->port.pmf)
3520 bnx2x_port_stats_init(bp);
3521
3522 else if (bp->func_stx)
3523 bnx2x_func_stats_init(bp);
3524
3525 bnx2x_hw_stats_post(bp);
3526 bnx2x_storm_stats_post(bp);
3527}
3528
3529static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3530{
3531 bnx2x_stats_comp(bp);
3532 bnx2x_stats_pmf_update(bp);
3533 bnx2x_stats_start(bp);
3534}
3535
3536static void bnx2x_stats_restart(struct bnx2x *bp)
3537{
3538 bnx2x_stats_comp(bp);
3539 bnx2x_stats_start(bp);
3540}
3541
3542static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3543{
3544 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3545 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3546 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3547 struct {
3548 u32 lo;
3549 u32 hi;
3550 } diff;
bb2a0f7a
YG
3551
3552 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3553 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3554 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3555 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3556 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3557 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3558 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3559 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3560 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3561 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3562 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3563 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3564 UPDATE_STAT64(tx_stat_gt127,
3565 tx_stat_etherstatspkts65octetsto127octets);
3566 UPDATE_STAT64(tx_stat_gt255,
3567 tx_stat_etherstatspkts128octetsto255octets);
3568 UPDATE_STAT64(tx_stat_gt511,
3569 tx_stat_etherstatspkts256octetsto511octets);
3570 UPDATE_STAT64(tx_stat_gt1023,
3571 tx_stat_etherstatspkts512octetsto1023octets);
3572 UPDATE_STAT64(tx_stat_gt1518,
3573 tx_stat_etherstatspkts1024octetsto1522octets);
3574 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3575 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3576 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3577 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3578 UPDATE_STAT64(tx_stat_gterr,
3579 tx_stat_dot3statsinternalmactransmiterrors);
3580 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3581
3582 estats->pause_frames_received_hi =
3583 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3584 estats->pause_frames_received_lo =
3585 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3586
3587 estats->pause_frames_sent_hi =
3588 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3589 estats->pause_frames_sent_lo =
3590 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3591}
3592
3593static void bnx2x_emac_stats_update(struct bnx2x *bp)
3594{
3595 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3596 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3597 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3598
3599 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3600 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3601 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3602 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3603 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3604 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3605 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3606 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3607 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3608 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3609 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3610 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3611 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3612 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3613 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3614 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3615 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3616 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3617 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3618 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3619 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3620 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3621 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3622 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3623 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3624 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3627 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3628 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3629 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3630
3631 estats->pause_frames_received_hi =
3632 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3633 estats->pause_frames_received_lo =
3634 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3635 ADD_64(estats->pause_frames_received_hi,
3636 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3637 estats->pause_frames_received_lo,
3638 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3639
3640 estats->pause_frames_sent_hi =
3641 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3642 estats->pause_frames_sent_lo =
3643 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3644 ADD_64(estats->pause_frames_sent_hi,
3645 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3646 estats->pause_frames_sent_lo,
3647 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3648}
3649
3650static int bnx2x_hw_stats_update(struct bnx2x *bp)
3651{
3652 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3653 struct nig_stats *old = &(bp->port.old_nig_stats);
3654 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3655 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3656 struct {
3657 u32 lo;
3658 u32 hi;
3659 } diff;
de832a55 3660 u32 nig_timer_max;
bb2a0f7a
YG
3661
3662 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3663 bnx2x_bmac_stats_update(bp);
3664
3665 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3666 bnx2x_emac_stats_update(bp);
3667
3668 else { /* unreached */
3669 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3670 return -1;
3671 }
a2fbb9ea 3672
bb2a0f7a
YG
3673 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3674 new->brb_discard - old->brb_discard);
66e855f3
YG
3675 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3676 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3677
bb2a0f7a
YG
3678 UPDATE_STAT64_NIG(egress_mac_pkt0,
3679 etherstatspkts1024octetsto1522octets);
3680 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3681
bb2a0f7a 3682 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3683
bb2a0f7a
YG
3684 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3685 sizeof(struct mac_stx));
3686 estats->brb_drop_hi = pstats->brb_drop_hi;
3687 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3688
bb2a0f7a 3689 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3690
de832a55
EG
3691 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3692 if (nig_timer_max != estats->nig_timer_max) {
3693 estats->nig_timer_max = nig_timer_max;
3694 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3695 }
3696
bb2a0f7a 3697 return 0;
a2fbb9ea
ET
3698}
3699
bb2a0f7a 3700static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3701{
3702 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3703 struct tstorm_per_port_stats *tport =
de832a55 3704 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3705 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3706 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3707 int i;
3708
3709 memset(&(fstats->total_bytes_received_hi), 0,
3710 sizeof(struct host_func_stats) - 2*sizeof(u32));
3711 estats->error_bytes_received_hi = 0;
3712 estats->error_bytes_received_lo = 0;
3713 estats->etherstatsoverrsizepkts_hi = 0;
3714 estats->etherstatsoverrsizepkts_lo = 0;
3715 estats->no_buff_discard_hi = 0;
3716 estats->no_buff_discard_lo = 0;
a2fbb9ea 3717
de832a55
EG
3718 for_each_queue(bp, i) {
3719 struct bnx2x_fastpath *fp = &bp->fp[i];
3720 int cl_id = fp->cl_id;
3721 struct tstorm_per_client_stats *tclient =
3722 &stats->tstorm_common.client_statistics[cl_id];
3723 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3724 struct ustorm_per_client_stats *uclient =
3725 &stats->ustorm_common.client_statistics[cl_id];
3726 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3727 struct xstorm_per_client_stats *xclient =
3728 &stats->xstorm_common.client_statistics[cl_id];
3729 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3730 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3731 u32 diff;
3732
3733 /* are storm stats valid? */
3734 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3735 bp->stats_counter) {
de832a55
EG
3736 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3737 " xstorm counter (%d) != stats_counter (%d)\n",
3738 i, xclient->stats_counter, bp->stats_counter);
3739 return -1;
3740 }
3741 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3742 bp->stats_counter) {
de832a55
EG
3743 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3744 " tstorm counter (%d) != stats_counter (%d)\n",
3745 i, tclient->stats_counter, bp->stats_counter);
3746 return -2;
3747 }
3748 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3749 bp->stats_counter) {
3750 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3751 " ustorm counter (%d) != stats_counter (%d)\n",
3752 i, uclient->stats_counter, bp->stats_counter);
3753 return -4;
3754 }
a2fbb9ea 3755
de832a55
EG
3756 qstats->total_bytes_received_hi =
3757 qstats->valid_bytes_received_hi =
a2fbb9ea 3758 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3759 qstats->total_bytes_received_lo =
3760 qstats->valid_bytes_received_lo =
a2fbb9ea 3761 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3762
de832a55 3763 qstats->error_bytes_received_hi =
bb2a0f7a 3764 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3765 qstats->error_bytes_received_lo =
bb2a0f7a 3766 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3767
de832a55
EG
3768 ADD_64(qstats->total_bytes_received_hi,
3769 qstats->error_bytes_received_hi,
3770 qstats->total_bytes_received_lo,
3771 qstats->error_bytes_received_lo);
3772
3773 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3774 total_unicast_packets_received);
3775 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3776 total_multicast_packets_received);
3777 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3778 total_broadcast_packets_received);
3779 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3780 etherstatsoverrsizepkts);
3781 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3782
3783 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3784 total_unicast_packets_received);
3785 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3786 total_multicast_packets_received);
3787 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3788 total_broadcast_packets_received);
3789 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3790 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3791 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3792
3793 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3794 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3795 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3796 le32_to_cpu(xclient->total_sent_bytes.lo);
3797
de832a55
EG
3798 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3799 total_unicast_packets_transmitted);
3800 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3801 total_multicast_packets_transmitted);
3802 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3803 total_broadcast_packets_transmitted);
3804
3805 old_tclient->checksum_discard = tclient->checksum_discard;
3806 old_tclient->ttl0_discard = tclient->ttl0_discard;
3807
3808 ADD_64(fstats->total_bytes_received_hi,
3809 qstats->total_bytes_received_hi,
3810 fstats->total_bytes_received_lo,
3811 qstats->total_bytes_received_lo);
3812 ADD_64(fstats->total_bytes_transmitted_hi,
3813 qstats->total_bytes_transmitted_hi,
3814 fstats->total_bytes_transmitted_lo,
3815 qstats->total_bytes_transmitted_lo);
3816 ADD_64(fstats->total_unicast_packets_received_hi,
3817 qstats->total_unicast_packets_received_hi,
3818 fstats->total_unicast_packets_received_lo,
3819 qstats->total_unicast_packets_received_lo);
3820 ADD_64(fstats->total_multicast_packets_received_hi,
3821 qstats->total_multicast_packets_received_hi,
3822 fstats->total_multicast_packets_received_lo,
3823 qstats->total_multicast_packets_received_lo);
3824 ADD_64(fstats->total_broadcast_packets_received_hi,
3825 qstats->total_broadcast_packets_received_hi,
3826 fstats->total_broadcast_packets_received_lo,
3827 qstats->total_broadcast_packets_received_lo);
3828 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3829 qstats->total_unicast_packets_transmitted_hi,
3830 fstats->total_unicast_packets_transmitted_lo,
3831 qstats->total_unicast_packets_transmitted_lo);
3832 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3833 qstats->total_multicast_packets_transmitted_hi,
3834 fstats->total_multicast_packets_transmitted_lo,
3835 qstats->total_multicast_packets_transmitted_lo);
3836 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3837 qstats->total_broadcast_packets_transmitted_hi,
3838 fstats->total_broadcast_packets_transmitted_lo,
3839 qstats->total_broadcast_packets_transmitted_lo);
3840 ADD_64(fstats->valid_bytes_received_hi,
3841 qstats->valid_bytes_received_hi,
3842 fstats->valid_bytes_received_lo,
3843 qstats->valid_bytes_received_lo);
3844
3845 ADD_64(estats->error_bytes_received_hi,
3846 qstats->error_bytes_received_hi,
3847 estats->error_bytes_received_lo,
3848 qstats->error_bytes_received_lo);
3849 ADD_64(estats->etherstatsoverrsizepkts_hi,
3850 qstats->etherstatsoverrsizepkts_hi,
3851 estats->etherstatsoverrsizepkts_lo,
3852 qstats->etherstatsoverrsizepkts_lo);
3853 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3854 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3855 }
3856
3857 ADD_64(fstats->total_bytes_received_hi,
3858 estats->rx_stat_ifhcinbadoctets_hi,
3859 fstats->total_bytes_received_lo,
3860 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3861
3862 memcpy(estats, &(fstats->total_bytes_received_hi),
3863 sizeof(struct host_func_stats) - 2*sizeof(u32));
3864
de832a55
EG
3865 ADD_64(estats->etherstatsoverrsizepkts_hi,
3866 estats->rx_stat_dot3statsframestoolong_hi,
3867 estats->etherstatsoverrsizepkts_lo,
3868 estats->rx_stat_dot3statsframestoolong_lo);
3869 ADD_64(estats->error_bytes_received_hi,
3870 estats->rx_stat_ifhcinbadoctets_hi,
3871 estats->error_bytes_received_lo,
3872 estats->rx_stat_ifhcinbadoctets_lo);
3873
3874 if (bp->port.pmf) {
3875 estats->mac_filter_discard =
3876 le32_to_cpu(tport->mac_filter_discard);
3877 estats->xxoverflow_discard =
3878 le32_to_cpu(tport->xxoverflow_discard);
3879 estats->brb_truncate_discard =
bb2a0f7a 3880 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3881 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3882 }
bb2a0f7a
YG
3883
3884 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3885
de832a55
EG
3886 bp->stats_pending = 0;
3887
a2fbb9ea
ET
3888 return 0;
3889}
3890
bb2a0f7a 3891static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3892{
bb2a0f7a 3893 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3894 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3895 int i;
a2fbb9ea
ET
3896
3897 nstats->rx_packets =
3898 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3899 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3900 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3901
3902 nstats->tx_packets =
3903 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3904 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3905 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3906
de832a55 3907 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3908
0e39e645 3909 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3910
de832a55
EG
3911 nstats->rx_dropped = estats->mac_discard;
3912 for_each_queue(bp, i)
3913 nstats->rx_dropped +=
3914 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3915
a2fbb9ea
ET
3916 nstats->tx_dropped = 0;
3917
3918 nstats->multicast =
de832a55 3919 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3920
bb2a0f7a 3921 nstats->collisions =
de832a55 3922 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3923
3924 nstats->rx_length_errors =
de832a55
EG
3925 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3926 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3927 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3928 bnx2x_hilo(&estats->brb_truncate_hi);
3929 nstats->rx_crc_errors =
3930 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3931 nstats->rx_frame_errors =
3932 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3933 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3934 nstats->rx_missed_errors = estats->xxoverflow_discard;
3935
3936 nstats->rx_errors = nstats->rx_length_errors +
3937 nstats->rx_over_errors +
3938 nstats->rx_crc_errors +
3939 nstats->rx_frame_errors +
0e39e645
ET
3940 nstats->rx_fifo_errors +
3941 nstats->rx_missed_errors;
a2fbb9ea 3942
bb2a0f7a 3943 nstats->tx_aborted_errors =
de832a55
EG
3944 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3945 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3946 nstats->tx_carrier_errors =
3947 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3948 nstats->tx_fifo_errors = 0;
3949 nstats->tx_heartbeat_errors = 0;
3950 nstats->tx_window_errors = 0;
3951
3952 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3953 nstats->tx_carrier_errors +
3954 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3955}
3956
3957static void bnx2x_drv_stats_update(struct bnx2x *bp)
3958{
3959 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3960 int i;
3961
3962 estats->driver_xoff = 0;
3963 estats->rx_err_discard_pkt = 0;
3964 estats->rx_skb_alloc_failed = 0;
3965 estats->hw_csum_err = 0;
3966 for_each_queue(bp, i) {
3967 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3968
3969 estats->driver_xoff += qstats->driver_xoff;
3970 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3971 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3972 estats->hw_csum_err += qstats->hw_csum_err;
3973 }
a2fbb9ea
ET
3974}
3975
bb2a0f7a 3976static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3977{
bb2a0f7a 3978 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3979
bb2a0f7a
YG
3980 if (*stats_comp != DMAE_COMP_VAL)
3981 return;
3982
3983 if (bp->port.pmf)
de832a55 3984 bnx2x_hw_stats_update(bp);
a2fbb9ea 3985
de832a55
EG
3986 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3987 BNX2X_ERR("storm stats were not updated for 3 times\n");
3988 bnx2x_panic();
3989 return;
a2fbb9ea
ET
3990 }
3991
de832a55
EG
3992 bnx2x_net_stats_update(bp);
3993 bnx2x_drv_stats_update(bp);
3994
a2fbb9ea 3995 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3996 struct tstorm_per_client_stats *old_tclient =
3997 &bp->fp->old_tclient;
3998 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3999 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4000 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4001 int i;
a2fbb9ea
ET
4002
4003 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4004 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4005 " tx pkt (%lx)\n",
4006 bnx2x_tx_avail(bp->fp),
7a9b2557 4007 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4008 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4009 " rx pkt (%lx)\n",
7a9b2557
VZ
4010 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4011 bp->fp->rx_comp_cons),
4012 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4013 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4014 "brb truncate %u\n",
4015 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4016 qstats->driver_xoff,
4017 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4018 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4019 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4020 "mac_discard %u mac_filter_discard %u "
4021 "xxovrflow_discard %u brb_truncate_discard %u "
4022 "ttl0_discard %u\n",
4781bfad 4023 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4024 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4025 bnx2x_hilo(&qstats->no_buff_discard_hi),
4026 estats->mac_discard, estats->mac_filter_discard,
4027 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4028 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4029
4030 for_each_queue(bp, i) {
4031 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4032 bnx2x_fp(bp, i, tx_pkt),
4033 bnx2x_fp(bp, i, rx_pkt),
4034 bnx2x_fp(bp, i, rx_calls));
4035 }
4036 }
4037
bb2a0f7a
YG
4038 bnx2x_hw_stats_post(bp);
4039 bnx2x_storm_stats_post(bp);
4040}
a2fbb9ea 4041
bb2a0f7a
YG
4042static void bnx2x_port_stats_stop(struct bnx2x *bp)
4043{
4044 struct dmae_command *dmae;
4045 u32 opcode;
4046 int loader_idx = PMF_DMAE_C(bp);
4047 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4048
bb2a0f7a 4049 bp->executer_idx = 0;
a2fbb9ea 4050
bb2a0f7a
YG
4051 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4052 DMAE_CMD_C_ENABLE |
4053 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4054#ifdef __BIG_ENDIAN
bb2a0f7a 4055 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4056#else
bb2a0f7a 4057 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4058#endif
bb2a0f7a
YG
4059 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4060 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4061
4062 if (bp->port.port_stx) {
4063
4064 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4065 if (bp->func_stx)
4066 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4067 else
4068 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4069 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4070 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4071 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4072 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4073 dmae->len = sizeof(struct host_port_stats) >> 2;
4074 if (bp->func_stx) {
4075 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4076 dmae->comp_addr_hi = 0;
4077 dmae->comp_val = 1;
4078 } else {
4079 dmae->comp_addr_lo =
4080 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4081 dmae->comp_addr_hi =
4082 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4083 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4084
bb2a0f7a
YG
4085 *stats_comp = 0;
4086 }
a2fbb9ea
ET
4087 }
4088
bb2a0f7a
YG
4089 if (bp->func_stx) {
4090
4091 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4092 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4093 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4094 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4095 dmae->dst_addr_lo = bp->func_stx >> 2;
4096 dmae->dst_addr_hi = 0;
4097 dmae->len = sizeof(struct host_func_stats) >> 2;
4098 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4099 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4100 dmae->comp_val = DMAE_COMP_VAL;
4101
4102 *stats_comp = 0;
a2fbb9ea 4103 }
bb2a0f7a
YG
4104}
4105
4106static void bnx2x_stats_stop(struct bnx2x *bp)
4107{
4108 int update = 0;
4109
4110 bnx2x_stats_comp(bp);
4111
4112 if (bp->port.pmf)
4113 update = (bnx2x_hw_stats_update(bp) == 0);
4114
4115 update |= (bnx2x_storm_stats_update(bp) == 0);
4116
4117 if (update) {
4118 bnx2x_net_stats_update(bp);
a2fbb9ea 4119
bb2a0f7a
YG
4120 if (bp->port.pmf)
4121 bnx2x_port_stats_stop(bp);
4122
4123 bnx2x_hw_stats_post(bp);
4124 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4125 }
4126}
4127
bb2a0f7a
YG
4128static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4129{
4130}
4131
4132static const struct {
4133 void (*action)(struct bnx2x *bp);
4134 enum bnx2x_stats_state next_state;
4135} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4136/* state event */
4137{
4138/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4139/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4140/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4141/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4142},
4143{
4144/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4145/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4146/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4147/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4148}
4149};
4150
4151static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4152{
4153 enum bnx2x_stats_state state = bp->stats_state;
4154
4155 bnx2x_stats_stm[state][event].action(bp);
4156 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4157
4158 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4159 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4160 state, event, bp->stats_state);
4161}
4162
a2fbb9ea
ET
4163static void bnx2x_timer(unsigned long data)
4164{
4165 struct bnx2x *bp = (struct bnx2x *) data;
4166
4167 if (!netif_running(bp->dev))
4168 return;
4169
4170 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4171 goto timer_restart;
a2fbb9ea
ET
4172
4173 if (poll) {
4174 struct bnx2x_fastpath *fp = &bp->fp[0];
4175 int rc;
4176
7961f791 4177 bnx2x_tx_int(fp);
a2fbb9ea
ET
4178 rc = bnx2x_rx_int(fp, 1000);
4179 }
4180
34f80b04
EG
4181 if (!BP_NOMCP(bp)) {
4182 int func = BP_FUNC(bp);
a2fbb9ea
ET
4183 u32 drv_pulse;
4184 u32 mcp_pulse;
4185
4186 ++bp->fw_drv_pulse_wr_seq;
4187 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4188 /* TBD - add SYSTEM_TIME */
4189 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4190 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4191
34f80b04 4192 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4193 MCP_PULSE_SEQ_MASK);
4194 /* The delta between driver pulse and mcp response
4195 * should be 1 (before mcp response) or 0 (after mcp response)
4196 */
4197 if ((drv_pulse != mcp_pulse) &&
4198 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4199 /* someone lost a heartbeat... */
4200 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4201 drv_pulse, mcp_pulse);
4202 }
4203 }
4204
bb2a0f7a
YG
4205 if ((bp->state == BNX2X_STATE_OPEN) ||
4206 (bp->state == BNX2X_STATE_DISABLED))
4207 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4208
f1410647 4209timer_restart:
a2fbb9ea
ET
4210 mod_timer(&bp->timer, jiffies + bp->current_interval);
4211}
4212
4213/* end of Statistics */
4214
4215/* nic init */
4216
4217/*
4218 * nic init service functions
4219 */
4220
34f80b04 4221static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4222{
34f80b04
EG
4223 int port = BP_PORT(bp);
4224
4225 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4226 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4227 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4228 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4229 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4230 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4231}
4232
5c862848
EG
4233static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4234 dma_addr_t mapping, int sb_id)
34f80b04
EG
4235{
4236 int port = BP_PORT(bp);
bb2a0f7a 4237 int func = BP_FUNC(bp);
a2fbb9ea 4238 int index;
34f80b04 4239 u64 section;
a2fbb9ea
ET
4240
4241 /* USTORM */
4242 section = ((u64)mapping) + offsetof(struct host_status_block,
4243 u_status_block);
34f80b04 4244 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4245
4246 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4247 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4248 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4249 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4250 U64_HI(section));
bb2a0f7a
YG
4251 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4252 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4253
4254 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4255 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4256 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4257
4258 /* CSTORM */
4259 section = ((u64)mapping) + offsetof(struct host_status_block,
4260 c_status_block);
34f80b04 4261 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4262
4263 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4264 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4265 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4266 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4267 U64_HI(section));
7a9b2557
VZ
4268 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4269 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4270
4271 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4272 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4273 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4274
4275 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4276}
4277
4278static void bnx2x_zero_def_sb(struct bnx2x *bp)
4279{
4280 int func = BP_FUNC(bp);
a2fbb9ea 4281
34f80b04
EG
4282 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4283 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4284 sizeof(struct ustorm_def_status_block)/4);
4285 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4286 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4287 sizeof(struct cstorm_def_status_block)/4);
4288 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4289 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4290 sizeof(struct xstorm_def_status_block)/4);
4291 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4292 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4293 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4294}
4295
4296static void bnx2x_init_def_sb(struct bnx2x *bp,
4297 struct host_def_status_block *def_sb,
34f80b04 4298 dma_addr_t mapping, int sb_id)
a2fbb9ea 4299{
34f80b04
EG
4300 int port = BP_PORT(bp);
4301 int func = BP_FUNC(bp);
a2fbb9ea
ET
4302 int index, val, reg_offset;
4303 u64 section;
4304
4305 /* ATTN */
4306 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4307 atten_status_block);
34f80b04 4308 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4309
49d66772
ET
4310 bp->attn_state = 0;
4311
a2fbb9ea
ET
4312 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4313 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4314
34f80b04 4315 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4316 bp->attn_group[index].sig[0] = REG_RD(bp,
4317 reg_offset + 0x10*index);
4318 bp->attn_group[index].sig[1] = REG_RD(bp,
4319 reg_offset + 0x4 + 0x10*index);
4320 bp->attn_group[index].sig[2] = REG_RD(bp,
4321 reg_offset + 0x8 + 0x10*index);
4322 bp->attn_group[index].sig[3] = REG_RD(bp,
4323 reg_offset + 0xc + 0x10*index);
4324 }
4325
a2fbb9ea
ET
4326 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4327 HC_REG_ATTN_MSG0_ADDR_L);
4328
4329 REG_WR(bp, reg_offset, U64_LO(section));
4330 REG_WR(bp, reg_offset + 4, U64_HI(section));
4331
4332 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4333
4334 val = REG_RD(bp, reg_offset);
34f80b04 4335 val |= sb_id;
a2fbb9ea
ET
4336 REG_WR(bp, reg_offset, val);
4337
4338 /* USTORM */
4339 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4340 u_def_status_block);
34f80b04 4341 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4342
4343 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4344 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4345 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4346 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4347 U64_HI(section));
5c862848 4348 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4349 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4350
4351 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4352 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4353 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4354
4355 /* CSTORM */
4356 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4357 c_def_status_block);
34f80b04 4358 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4359
4360 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4361 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4362 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4363 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4364 U64_HI(section));
5c862848 4365 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4366 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4367
4368 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4369 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4370 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4371
4372 /* TSTORM */
4373 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4374 t_def_status_block);
34f80b04 4375 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4376
4377 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4378 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4379 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4380 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4381 U64_HI(section));
5c862848 4382 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4383 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4384
4385 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4386 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4387 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4388
4389 /* XSTORM */
4390 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4391 x_def_status_block);
34f80b04 4392 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4393
4394 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4395 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4396 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4397 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4398 U64_HI(section));
5c862848 4399 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4400 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4401
4402 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4403 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4404 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4405
bb2a0f7a 4406 bp->stats_pending = 0;
66e855f3 4407 bp->set_mac_pending = 0;
bb2a0f7a 4408
34f80b04 4409 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4410}
4411
4412static void bnx2x_update_coalesce(struct bnx2x *bp)
4413{
34f80b04 4414 int port = BP_PORT(bp);
a2fbb9ea
ET
4415 int i;
4416
4417 for_each_queue(bp, i) {
34f80b04 4418 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4419
4420 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4421 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4422 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4423 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4424 bp->rx_ticks/12);
a2fbb9ea 4425 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4426 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4427 U_SB_ETH_RX_CQ_INDEX),
4428 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4429
4430 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4431 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4432 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4433 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4434 bp->tx_ticks/12);
a2fbb9ea 4435 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4436 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4437 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4438 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4439 }
4440}
4441
7a9b2557
VZ
4442static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4443 struct bnx2x_fastpath *fp, int last)
4444{
4445 int i;
4446
4447 for (i = 0; i < last; i++) {
4448 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4449 struct sk_buff *skb = rx_buf->skb;
4450
4451 if (skb == NULL) {
4452 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4453 continue;
4454 }
4455
4456 if (fp->tpa_state[i] == BNX2X_TPA_START)
4457 pci_unmap_single(bp->pdev,
4458 pci_unmap_addr(rx_buf, mapping),
356e2385 4459 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4460
4461 dev_kfree_skb(skb);
4462 rx_buf->skb = NULL;
4463 }
4464}
4465
a2fbb9ea
ET
4466static void bnx2x_init_rx_rings(struct bnx2x *bp)
4467{
7a9b2557 4468 int func = BP_FUNC(bp);
32626230
EG
4469 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4470 ETH_MAX_AGGREGATION_QUEUES_E1H;
4471 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4472 int i, j;
a2fbb9ea 4473
87942b46 4474 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4475 DP(NETIF_MSG_IFUP,
4476 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4477
7a9b2557 4478 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4479
555f6c78 4480 for_each_rx_queue(bp, j) {
32626230 4481 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4482
32626230 4483 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4484 fp->tpa_pool[i].skb =
4485 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4486 if (!fp->tpa_pool[i].skb) {
4487 BNX2X_ERR("Failed to allocate TPA "
4488 "skb pool for queue[%d] - "
4489 "disabling TPA on this "
4490 "queue!\n", j);
4491 bnx2x_free_tpa_pool(bp, fp, i);
4492 fp->disable_tpa = 1;
4493 break;
4494 }
4495 pci_unmap_addr_set((struct sw_rx_bd *)
4496 &bp->fp->tpa_pool[i],
4497 mapping, 0);
4498 fp->tpa_state[i] = BNX2X_TPA_STOP;
4499 }
4500 }
4501 }
4502
555f6c78 4503 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4504 struct bnx2x_fastpath *fp = &bp->fp[j];
4505
4506 fp->rx_bd_cons = 0;
4507 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4508 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4509
4510 /* "next page" elements initialization */
4511 /* SGE ring */
4512 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4513 struct eth_rx_sge *sge;
4514
4515 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4516 sge->addr_hi =
4517 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4518 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4519 sge->addr_lo =
4520 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4521 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4522 }
4523
4524 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4525
7a9b2557 4526 /* RX BD ring */
a2fbb9ea
ET
4527 for (i = 1; i <= NUM_RX_RINGS; i++) {
4528 struct eth_rx_bd *rx_bd;
4529
4530 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4531 rx_bd->addr_hi =
4532 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4533 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4534 rx_bd->addr_lo =
4535 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4536 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4537 }
4538
34f80b04 4539 /* CQ ring */
a2fbb9ea
ET
4540 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4541 struct eth_rx_cqe_next_page *nextpg;
4542
4543 nextpg = (struct eth_rx_cqe_next_page *)
4544 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4545 nextpg->addr_hi =
4546 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4547 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4548 nextpg->addr_lo =
4549 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4550 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4551 }
4552
7a9b2557
VZ
4553 /* Allocate SGEs and initialize the ring elements */
4554 for (i = 0, ring_prod = 0;
4555 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4556
7a9b2557
VZ
4557 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4558 BNX2X_ERR("was only able to allocate "
4559 "%d rx sges\n", i);
4560 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4561 /* Cleanup already allocated elements */
4562 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4563 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4564 fp->disable_tpa = 1;
4565 ring_prod = 0;
4566 break;
4567 }
4568 ring_prod = NEXT_SGE_IDX(ring_prod);
4569 }
4570 fp->rx_sge_prod = ring_prod;
4571
4572 /* Allocate BDs and initialize BD ring */
66e855f3 4573 fp->rx_comp_cons = 0;
7a9b2557 4574 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4575 for (i = 0; i < bp->rx_ring_size; i++) {
4576 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4577 BNX2X_ERR("was only able to allocate "
de832a55
EG
4578 "%d rx skbs on queue[%d]\n", i, j);
4579 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4580 break;
4581 }
4582 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4583 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4584 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4585 }
4586
7a9b2557
VZ
4587 fp->rx_bd_prod = ring_prod;
4588 /* must not have more available CQEs than BDs */
4589 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4590 cqe_ring_prod);
a2fbb9ea
ET
4591 fp->rx_pkt = fp->rx_calls = 0;
4592
7a9b2557
VZ
4593 /* Warning!
4594 * this will generate an interrupt (to the TSTORM)
4595 * must only be done after chip is initialized
4596 */
4597 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4598 fp->rx_sge_prod);
a2fbb9ea
ET
4599 if (j != 0)
4600 continue;
4601
4602 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4603 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4604 U64_LO(fp->rx_comp_mapping));
4605 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4606 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4607 U64_HI(fp->rx_comp_mapping));
4608 }
4609}
4610
4611static void bnx2x_init_tx_ring(struct bnx2x *bp)
4612{
4613 int i, j;
4614
555f6c78 4615 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4616 struct bnx2x_fastpath *fp = &bp->fp[j];
4617
4618 for (i = 1; i <= NUM_TX_RINGS; i++) {
4619 struct eth_tx_bd *tx_bd =
4620 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4621
4622 tx_bd->addr_hi =
4623 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4624 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4625 tx_bd->addr_lo =
4626 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4627 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4628 }
4629
4630 fp->tx_pkt_prod = 0;
4631 fp->tx_pkt_cons = 0;
4632 fp->tx_bd_prod = 0;
4633 fp->tx_bd_cons = 0;
4634 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4635 fp->tx_pkt = 0;
4636 }
4637}
4638
4639static void bnx2x_init_sp_ring(struct bnx2x *bp)
4640{
34f80b04 4641 int func = BP_FUNC(bp);
a2fbb9ea
ET
4642
4643 spin_lock_init(&bp->spq_lock);
4644
4645 bp->spq_left = MAX_SPQ_PENDING;
4646 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4647 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4648 bp->spq_prod_bd = bp->spq;
4649 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4650
34f80b04 4651 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4652 U64_LO(bp->spq_mapping));
34f80b04
EG
4653 REG_WR(bp,
4654 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4655 U64_HI(bp->spq_mapping));
4656
34f80b04 4657 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4658 bp->spq_prod_idx);
4659}
4660
4661static void bnx2x_init_context(struct bnx2x *bp)
4662{
4663 int i;
4664
4665 for_each_queue(bp, i) {
4666 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4667 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4668 u8 cl_id = fp->cl_id;
0626b899 4669 u8 sb_id = fp->sb_id;
a2fbb9ea 4670
34f80b04
EG
4671 context->ustorm_st_context.common.sb_index_numbers =
4672 BNX2X_RX_SB_INDEX_NUM;
0626b899 4673 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4674 context->ustorm_st_context.common.status_block_id = sb_id;
4675 context->ustorm_st_context.common.flags =
de832a55
EG
4676 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4677 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4678 context->ustorm_st_context.common.statistics_counter_id =
4679 cl_id;
8d9c5f34 4680 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4681 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4682 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4683 bp->rx_buf_size;
34f80b04 4684 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4685 U64_HI(fp->rx_desc_mapping);
34f80b04 4686 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4687 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4688 if (!fp->disable_tpa) {
4689 context->ustorm_st_context.common.flags |=
4690 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4691 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4692 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4693 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4694 (u32)0xffff);
7a9b2557
VZ
4695 context->ustorm_st_context.common.sge_page_base_hi =
4696 U64_HI(fp->rx_sge_mapping);
4697 context->ustorm_st_context.common.sge_page_base_lo =
4698 U64_LO(fp->rx_sge_mapping);
4699 }
4700
8d9c5f34
EG
4701 context->ustorm_ag_context.cdu_usage =
4702 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4703 CDU_REGION_NUMBER_UCM_AG,
4704 ETH_CONNECTION_TYPE);
4705
4706 context->xstorm_st_context.tx_bd_page_base_hi =
4707 U64_HI(fp->tx_desc_mapping);
4708 context->xstorm_st_context.tx_bd_page_base_lo =
4709 U64_LO(fp->tx_desc_mapping);
4710 context->xstorm_st_context.db_data_addr_hi =
4711 U64_HI(fp->tx_prods_mapping);
4712 context->xstorm_st_context.db_data_addr_lo =
4713 U64_LO(fp->tx_prods_mapping);
0626b899 4714 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4715 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4716 context->cstorm_st_context.sb_index_number =
5c862848 4717 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4718 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4719
4720 context->xstorm_ag_context.cdu_reserved =
4721 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4722 CDU_REGION_NUMBER_XCM_AG,
4723 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4724 }
4725}
4726
4727static void bnx2x_init_ind_table(struct bnx2x *bp)
4728{
26c8fa4d 4729 int func = BP_FUNC(bp);
a2fbb9ea
ET
4730 int i;
4731
555f6c78 4732 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4733 return;
4734
555f6c78
EG
4735 DP(NETIF_MSG_IFUP,
4736 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4737 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4738 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4739 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4740 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4741}
4742
49d66772
ET
4743static void bnx2x_set_client_config(struct bnx2x *bp)
4744{
49d66772 4745 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4746 int port = BP_PORT(bp);
4747 int i;
49d66772 4748
e7799c5f 4749 tstorm_client.mtu = bp->dev->mtu;
49d66772 4750 tstorm_client.config_flags =
de832a55
EG
4751 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4752 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4753#ifdef BCM_VLAN
0c6671b0 4754 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4755 tstorm_client.config_flags |=
8d9c5f34 4756 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4757 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4758 }
4759#endif
49d66772 4760
7a9b2557
VZ
4761 if (bp->flags & TPA_ENABLE_FLAG) {
4762 tstorm_client.max_sges_for_packet =
4f40f2cb 4763 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4764 tstorm_client.max_sges_for_packet =
4765 ((tstorm_client.max_sges_for_packet +
4766 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4767 PAGES_PER_SGE_SHIFT;
4768
4769 tstorm_client.config_flags |=
4770 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4771 }
4772
49d66772 4773 for_each_queue(bp, i) {
de832a55
EG
4774 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4775
49d66772 4776 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4777 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4778 ((u32 *)&tstorm_client)[0]);
4779 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4780 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4781 ((u32 *)&tstorm_client)[1]);
4782 }
4783
34f80b04
EG
4784 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4785 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4786}
4787
a2fbb9ea
ET
4788static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4789{
a2fbb9ea 4790 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4791 int mode = bp->rx_mode;
4792 int mask = (1 << BP_L_ID(bp));
4793 int func = BP_FUNC(bp);
a2fbb9ea
ET
4794 int i;
4795
3196a88a 4796 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4797
4798 switch (mode) {
4799 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4800 tstorm_mac_filter.ucast_drop_all = mask;
4801 tstorm_mac_filter.mcast_drop_all = mask;
4802 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4803 break;
356e2385 4804
a2fbb9ea 4805 case BNX2X_RX_MODE_NORMAL:
34f80b04 4806 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4807 break;
356e2385 4808
a2fbb9ea 4809 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4810 tstorm_mac_filter.mcast_accept_all = mask;
4811 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4812 break;
356e2385 4813
a2fbb9ea 4814 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4815 tstorm_mac_filter.ucast_accept_all = mask;
4816 tstorm_mac_filter.mcast_accept_all = mask;
4817 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4818 break;
356e2385 4819
a2fbb9ea 4820 default:
34f80b04
EG
4821 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4822 break;
a2fbb9ea
ET
4823 }
4824
4825 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4826 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4827 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4828 ((u32 *)&tstorm_mac_filter)[i]);
4829
34f80b04 4830/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4831 ((u32 *)&tstorm_mac_filter)[i]); */
4832 }
a2fbb9ea 4833
49d66772
ET
4834 if (mode != BNX2X_RX_MODE_NONE)
4835 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4836}
4837
471de716
EG
4838static void bnx2x_init_internal_common(struct bnx2x *bp)
4839{
4840 int i;
4841
3cdf1db7
YG
4842 if (bp->flags & TPA_ENABLE_FLAG) {
4843 struct tstorm_eth_tpa_exist tpa = {0};
4844
4845 tpa.tpa_exist = 1;
4846
4847 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4848 ((u32 *)&tpa)[0]);
4849 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4850 ((u32 *)&tpa)[1]);
4851 }
4852
471de716
EG
4853 /* Zero this manually as its initialization is
4854 currently missing in the initTool */
4855 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4856 REG_WR(bp, BAR_USTRORM_INTMEM +
4857 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4858}
4859
4860static void bnx2x_init_internal_port(struct bnx2x *bp)
4861{
4862 int port = BP_PORT(bp);
4863
4864 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4865 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4866 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4867 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868}
4869
8a1c38d1
EG
4870/* Calculates the sum of vn_min_rates.
4871 It's needed for further normalizing of the min_rates.
4872 Returns:
4873 sum of vn_min_rates.
4874 or
4875 0 - if all the min_rates are 0.
4876 In the later case fainess algorithm should be deactivated.
4877 If not all min_rates are zero then those that are zeroes will be set to 1.
4878 */
4879static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4880{
4881 int all_zero = 1;
4882 int port = BP_PORT(bp);
4883 int vn;
4884
4885 bp->vn_weight_sum = 0;
4886 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4887 int func = 2*vn + port;
4888 u32 vn_cfg =
4889 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4890 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4891 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4892
4893 /* Skip hidden vns */
4894 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4895 continue;
4896
4897 /* If min rate is zero - set it to 1 */
4898 if (!vn_min_rate)
4899 vn_min_rate = DEF_MIN_RATE;
4900 else
4901 all_zero = 0;
4902
4903 bp->vn_weight_sum += vn_min_rate;
4904 }
4905
4906 /* ... only if all min rates are zeros - disable fairness */
4907 if (all_zero)
4908 bp->vn_weight_sum = 0;
4909}
4910
471de716 4911static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4912{
a2fbb9ea
ET
4913 struct tstorm_eth_function_common_config tstorm_config = {0};
4914 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4915 int port = BP_PORT(bp);
4916 int func = BP_FUNC(bp);
de832a55
EG
4917 int i, j;
4918 u32 offset;
471de716 4919 u16 max_agg_size;
a2fbb9ea
ET
4920
4921 if (is_multi(bp)) {
555f6c78 4922 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4923 tstorm_config.rss_result_mask = MULTI_MASK;
4924 }
8d9c5f34
EG
4925 if (IS_E1HMF(bp))
4926 tstorm_config.config_flags |=
4927 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4928
34f80b04
EG
4929 tstorm_config.leading_client_id = BP_L_ID(bp);
4930
a2fbb9ea 4931 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4932 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4933 (*(u32 *)&tstorm_config));
4934
c14423fe 4935 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4936 bnx2x_set_storm_rx_mode(bp);
4937
de832a55
EG
4938 for_each_queue(bp, i) {
4939 u8 cl_id = bp->fp[i].cl_id;
4940
4941 /* reset xstorm per client statistics */
4942 offset = BAR_XSTRORM_INTMEM +
4943 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4944 for (j = 0;
4945 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4946 REG_WR(bp, offset + j*4, 0);
4947
4948 /* reset tstorm per client statistics */
4949 offset = BAR_TSTRORM_INTMEM +
4950 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4951 for (j = 0;
4952 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4953 REG_WR(bp, offset + j*4, 0);
4954
4955 /* reset ustorm per client statistics */
4956 offset = BAR_USTRORM_INTMEM +
4957 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4958 for (j = 0;
4959 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4960 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4961 }
4962
4963 /* Init statistics related context */
34f80b04 4964 stats_flags.collect_eth = 1;
a2fbb9ea 4965
66e855f3 4966 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4967 ((u32 *)&stats_flags)[0]);
66e855f3 4968 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4969 ((u32 *)&stats_flags)[1]);
4970
66e855f3 4971 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4972 ((u32 *)&stats_flags)[0]);
66e855f3 4973 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4974 ((u32 *)&stats_flags)[1]);
4975
de832a55
EG
4976 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4977 ((u32 *)&stats_flags)[0]);
4978 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4979 ((u32 *)&stats_flags)[1]);
4980
66e855f3 4981 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4982 ((u32 *)&stats_flags)[0]);
66e855f3 4983 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4984 ((u32 *)&stats_flags)[1]);
4985
66e855f3
YG
4986 REG_WR(bp, BAR_XSTRORM_INTMEM +
4987 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4988 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4989 REG_WR(bp, BAR_XSTRORM_INTMEM +
4990 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4991 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4992
4993 REG_WR(bp, BAR_TSTRORM_INTMEM +
4994 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4995 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4996 REG_WR(bp, BAR_TSTRORM_INTMEM +
4997 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4998 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4999
de832a55
EG
5000 REG_WR(bp, BAR_USTRORM_INTMEM +
5001 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5002 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5003 REG_WR(bp, BAR_USTRORM_INTMEM +
5004 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5005 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5006
34f80b04
EG
5007 if (CHIP_IS_E1H(bp)) {
5008 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5009 IS_E1HMF(bp));
5010 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5011 IS_E1HMF(bp));
5012 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5013 IS_E1HMF(bp));
5014 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5015 IS_E1HMF(bp));
5016
7a9b2557
VZ
5017 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5018 bp->e1hov);
34f80b04
EG
5019 }
5020
4f40f2cb
EG
5021 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5022 max_agg_size =
5023 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5024 SGE_PAGE_SIZE * PAGES_PER_SGE),
5025 (u32)0xffff);
555f6c78 5026 for_each_rx_queue(bp, i) {
7a9b2557 5027 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5028
5029 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5030 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5031 U64_LO(fp->rx_comp_mapping));
5032 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5033 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5034 U64_HI(fp->rx_comp_mapping));
5035
7a9b2557 5036 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5037 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5038 max_agg_size);
5039 }
8a1c38d1 5040
1c06328c
EG
5041 /* dropless flow control */
5042 if (CHIP_IS_E1H(bp)) {
5043 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5044
5045 rx_pause.bd_thr_low = 250;
5046 rx_pause.cqe_thr_low = 250;
5047 rx_pause.cos = 1;
5048 rx_pause.sge_thr_low = 0;
5049 rx_pause.bd_thr_high = 350;
5050 rx_pause.cqe_thr_high = 350;
5051 rx_pause.sge_thr_high = 0;
5052
5053 for_each_rx_queue(bp, i) {
5054 struct bnx2x_fastpath *fp = &bp->fp[i];
5055
5056 if (!fp->disable_tpa) {
5057 rx_pause.sge_thr_low = 150;
5058 rx_pause.sge_thr_high = 250;
5059 }
5060
5061
5062 offset = BAR_USTRORM_INTMEM +
5063 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5064 fp->cl_id);
5065 for (j = 0;
5066 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5067 j++)
5068 REG_WR(bp, offset + j*4,
5069 ((u32 *)&rx_pause)[j]);
5070 }
5071 }
5072
8a1c38d1
EG
5073 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5074
5075 /* Init rate shaping and fairness contexts */
5076 if (IS_E1HMF(bp)) {
5077 int vn;
5078
5079 /* During init there is no active link
5080 Until link is up, set link rate to 10Gbps */
5081 bp->link_vars.line_speed = SPEED_10000;
5082 bnx2x_init_port_minmax(bp);
5083
5084 bnx2x_calc_vn_weight_sum(bp);
5085
5086 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5087 bnx2x_init_vn_minmax(bp, 2*vn + port);
5088
5089 /* Enable rate shaping and fairness */
5090 bp->cmng.flags.cmng_enables =
5091 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5092 if (bp->vn_weight_sum)
5093 bp->cmng.flags.cmng_enables |=
5094 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5095 else
5096 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5097 " fairness will be disabled\n");
5098 } else {
5099 /* rate shaping and fairness are disabled */
5100 DP(NETIF_MSG_IFUP,
5101 "single function mode minmax will be disabled\n");
5102 }
5103
5104
5105 /* Store it to internal memory */
5106 if (bp->port.pmf)
5107 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5108 REG_WR(bp, BAR_XSTRORM_INTMEM +
5109 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5110 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5111}
5112
471de716
EG
5113static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5114{
5115 switch (load_code) {
5116 case FW_MSG_CODE_DRV_LOAD_COMMON:
5117 bnx2x_init_internal_common(bp);
5118 /* no break */
5119
5120 case FW_MSG_CODE_DRV_LOAD_PORT:
5121 bnx2x_init_internal_port(bp);
5122 /* no break */
5123
5124 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5125 bnx2x_init_internal_func(bp);
5126 break;
5127
5128 default:
5129 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5130 break;
5131 }
5132}
5133
5134static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5135{
5136 int i;
5137
5138 for_each_queue(bp, i) {
5139 struct bnx2x_fastpath *fp = &bp->fp[i];
5140
34f80b04 5141 fp->bp = bp;
a2fbb9ea 5142 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5143 fp->index = i;
34f80b04
EG
5144 fp->cl_id = BP_L_ID(bp) + i;
5145 fp->sb_id = fp->cl_id;
5146 DP(NETIF_MSG_IFUP,
f5372251
EG
5147 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5148 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5149 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5150 fp->sb_id);
5c862848 5151 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5152 }
5153
16119785
EG
5154 /* ensure status block indices were read */
5155 rmb();
5156
5157
5c862848
EG
5158 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5159 DEF_SB_ID);
5160 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5161 bnx2x_update_coalesce(bp);
5162 bnx2x_init_rx_rings(bp);
5163 bnx2x_init_tx_ring(bp);
5164 bnx2x_init_sp_ring(bp);
5165 bnx2x_init_context(bp);
471de716 5166 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5167 bnx2x_init_ind_table(bp);
0ef00459
EG
5168 bnx2x_stats_init(bp);
5169
5170 /* At this point, we are ready for interrupts */
5171 atomic_set(&bp->intr_sem, 0);
5172
5173 /* flush all before enabling interrupts */
5174 mb();
5175 mmiowb();
5176
615f8fd9 5177 bnx2x_int_enable(bp);
a2fbb9ea
ET
5178}
5179
5180/* end of nic init */
5181
5182/*
5183 * gzip service functions
5184 */
5185
5186static int bnx2x_gunzip_init(struct bnx2x *bp)
5187{
5188 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5189 &bp->gunzip_mapping);
5190 if (bp->gunzip_buf == NULL)
5191 goto gunzip_nomem1;
5192
5193 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5194 if (bp->strm == NULL)
5195 goto gunzip_nomem2;
5196
5197 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5198 GFP_KERNEL);
5199 if (bp->strm->workspace == NULL)
5200 goto gunzip_nomem3;
5201
5202 return 0;
5203
5204gunzip_nomem3:
5205 kfree(bp->strm);
5206 bp->strm = NULL;
5207
5208gunzip_nomem2:
5209 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5210 bp->gunzip_mapping);
5211 bp->gunzip_buf = NULL;
5212
5213gunzip_nomem1:
5214 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5215 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5216 return -ENOMEM;
5217}
5218
5219static void bnx2x_gunzip_end(struct bnx2x *bp)
5220{
5221 kfree(bp->strm->workspace);
5222
5223 kfree(bp->strm);
5224 bp->strm = NULL;
5225
5226 if (bp->gunzip_buf) {
5227 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5228 bp->gunzip_mapping);
5229 bp->gunzip_buf = NULL;
5230 }
5231}
5232
5233static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5234{
5235 int n, rc;
5236
5237 /* check gzip header */
5238 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5239 return -EINVAL;
5240
5241 n = 10;
5242
34f80b04 5243#define FNAME 0x8
a2fbb9ea
ET
5244
5245 if (zbuf[3] & FNAME)
5246 while ((zbuf[n++] != 0) && (n < len));
5247
5248 bp->strm->next_in = zbuf + n;
5249 bp->strm->avail_in = len - n;
5250 bp->strm->next_out = bp->gunzip_buf;
5251 bp->strm->avail_out = FW_BUF_SIZE;
5252
5253 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5254 if (rc != Z_OK)
5255 return rc;
5256
5257 rc = zlib_inflate(bp->strm, Z_FINISH);
5258 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5259 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5260 bp->dev->name, bp->strm->msg);
5261
5262 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5263 if (bp->gunzip_outlen & 0x3)
5264 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5265 " gunzip_outlen (%d) not aligned\n",
5266 bp->dev->name, bp->gunzip_outlen);
5267 bp->gunzip_outlen >>= 2;
5268
5269 zlib_inflateEnd(bp->strm);
5270
5271 if (rc == Z_STREAM_END)
5272 return 0;
5273
5274 return rc;
5275}
5276
5277/* nic load/unload */
5278
5279/*
34f80b04 5280 * General service functions
a2fbb9ea
ET
5281 */
5282
5283/* send a NIG loopback debug packet */
5284static void bnx2x_lb_pckt(struct bnx2x *bp)
5285{
a2fbb9ea 5286 u32 wb_write[3];
a2fbb9ea
ET
5287
5288 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5289 wb_write[0] = 0x55555555;
5290 wb_write[1] = 0x55555555;
34f80b04 5291 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5292 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5293
5294 /* NON-IP protocol */
a2fbb9ea
ET
5295 wb_write[0] = 0x09000000;
5296 wb_write[1] = 0x55555555;
34f80b04 5297 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5298 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5299}
5300
5301/* some of the internal memories
5302 * are not directly readable from the driver
5303 * to test them we send debug packets
5304 */
5305static int bnx2x_int_mem_test(struct bnx2x *bp)
5306{
5307 int factor;
5308 int count, i;
5309 u32 val = 0;
5310
ad8d3948 5311 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5312 factor = 120;
ad8d3948
EG
5313 else if (CHIP_REV_IS_EMUL(bp))
5314 factor = 200;
5315 else
a2fbb9ea 5316 factor = 1;
a2fbb9ea
ET
5317
5318 DP(NETIF_MSG_HW, "start part1\n");
5319
5320 /* Disable inputs of parser neighbor blocks */
5321 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5322 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5323 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5324 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5325
5326 /* Write 0 to parser credits for CFC search request */
5327 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5328
5329 /* send Ethernet packet */
5330 bnx2x_lb_pckt(bp);
5331
5332 /* TODO do i reset NIG statistic? */
5333 /* Wait until NIG register shows 1 packet of size 0x10 */
5334 count = 1000 * factor;
5335 while (count) {
34f80b04 5336
a2fbb9ea
ET
5337 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5338 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5339 if (val == 0x10)
5340 break;
5341
5342 msleep(10);
5343 count--;
5344 }
5345 if (val != 0x10) {
5346 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5347 return -1;
5348 }
5349
5350 /* Wait until PRS register shows 1 packet */
5351 count = 1000 * factor;
5352 while (count) {
5353 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5354 if (val == 1)
5355 break;
5356
5357 msleep(10);
5358 count--;
5359 }
5360 if (val != 0x1) {
5361 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5362 return -2;
5363 }
5364
5365 /* Reset and init BRB, PRS */
34f80b04 5366 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5367 msleep(50);
34f80b04 5368 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5369 msleep(50);
5370 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5371 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5372
5373 DP(NETIF_MSG_HW, "part2\n");
5374
5375 /* Disable inputs of parser neighbor blocks */
5376 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5377 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5378 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5379 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5380
5381 /* Write 0 to parser credits for CFC search request */
5382 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5383
5384 /* send 10 Ethernet packets */
5385 for (i = 0; i < 10; i++)
5386 bnx2x_lb_pckt(bp);
5387
5388 /* Wait until NIG register shows 10 + 1
5389 packets of size 11*0x10 = 0xb0 */
5390 count = 1000 * factor;
5391 while (count) {
34f80b04 5392
a2fbb9ea
ET
5393 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5394 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5395 if (val == 0xb0)
5396 break;
5397
5398 msleep(10);
5399 count--;
5400 }
5401 if (val != 0xb0) {
5402 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5403 return -3;
5404 }
5405
5406 /* Wait until PRS register shows 2 packets */
5407 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5408 if (val != 2)
5409 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5410
5411 /* Write 1 to parser credits for CFC search request */
5412 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5413
5414 /* Wait until PRS register shows 3 packets */
5415 msleep(10 * factor);
5416 /* Wait until NIG register shows 1 packet of size 0x10 */
5417 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5418 if (val != 3)
5419 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5420
5421 /* clear NIG EOP FIFO */
5422 for (i = 0; i < 11; i++)
5423 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5424 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5425 if (val != 1) {
5426 BNX2X_ERR("clear of NIG failed\n");
5427 return -4;
5428 }
5429
5430 /* Reset and init BRB, PRS, NIG */
5431 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5432 msleep(50);
5433 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5434 msleep(50);
5435 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5436 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5437#ifndef BCM_ISCSI
5438 /* set NIC mode */
5439 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5440#endif
5441
5442 /* Enable inputs of parser neighbor blocks */
5443 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5444 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5445 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5446 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5447
5448 DP(NETIF_MSG_HW, "done\n");
5449
5450 return 0; /* OK */
5451}
5452
5453static void enable_blocks_attention(struct bnx2x *bp)
5454{
5455 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5456 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5457 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5458 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5459 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5460 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5461 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5462 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5463 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5464/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5465/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5466 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5467 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5468 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5469/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5470/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5471 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5472 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5473 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5474 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5475/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5476/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5477 if (CHIP_REV_IS_FPGA(bp))
5478 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5479 else
5480 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5481 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5482 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5483 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5484/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5485/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5486 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5487 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5488/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5489 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5490}
5491
34f80b04 5492
81f75bbf
EG
5493static void bnx2x_reset_common(struct bnx2x *bp)
5494{
5495 /* reset_common */
5496 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5497 0xd3ffff7f);
5498 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5499}
5500
34f80b04 5501static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5502{
a2fbb9ea 5503 u32 val, i;
a2fbb9ea 5504
34f80b04 5505 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5506
81f75bbf 5507 bnx2x_reset_common(bp);
34f80b04
EG
5508 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5509 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5510
34f80b04
EG
5511 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5512 if (CHIP_IS_E1H(bp))
5513 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5514
34f80b04
EG
5515 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5516 msleep(30);
5517 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5518
34f80b04
EG
5519 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5520 if (CHIP_IS_E1(bp)) {
5521 /* enable HW interrupt from PXP on USDM overflow
5522 bit 16 on INT_MASK_0 */
5523 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5524 }
a2fbb9ea 5525
34f80b04
EG
5526 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5527 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5528
5529#ifdef __BIG_ENDIAN
34f80b04
EG
5530 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5531 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5532 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5533 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5534 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5535 /* make sure this value is 0 */
5536 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5537
5538/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5539 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5540 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5541 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5542 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5543#endif
5544
34f80b04 5545 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5546#ifdef BCM_ISCSI
34f80b04
EG
5547 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5548 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5549 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5550#endif
5551
34f80b04
EG
5552 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5553 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5554
34f80b04
EG
5555 /* let the HW do it's magic ... */
5556 msleep(100);
5557 /* finish PXP init */
5558 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5559 if (val != 1) {
5560 BNX2X_ERR("PXP2 CFG failed\n");
5561 return -EBUSY;
5562 }
5563 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5564 if (val != 1) {
5565 BNX2X_ERR("PXP2 RD_INIT failed\n");
5566 return -EBUSY;
5567 }
a2fbb9ea 5568
34f80b04
EG
5569 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5570 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5571
34f80b04 5572 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5573
34f80b04
EG
5574 /* clean the DMAE memory */
5575 bp->dmae_ready = 1;
5576 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5577
34f80b04
EG
5578 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5579 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5580 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5581 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5582
34f80b04
EG
5583 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5584 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5585 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5586 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5587
5588 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5589 /* soft reset pulse */
5590 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5591 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5592
5593#ifdef BCM_ISCSI
34f80b04 5594 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5595#endif
a2fbb9ea 5596
34f80b04
EG
5597 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5598 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5599 if (!CHIP_REV_IS_SLOW(bp)) {
5600 /* enable hw interrupt from doorbell Q */
5601 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5602 }
a2fbb9ea 5603
34f80b04 5604 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5605 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5606 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5607 /* set NIC mode */
5608 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5609 if (CHIP_IS_E1H(bp))
5610 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5611
34f80b04
EG
5612 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5613 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5614 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5615 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5616
34f80b04
EG
5617 if (CHIP_IS_E1H(bp)) {
5618 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5619 STORM_INTMEM_SIZE_E1H/2);
5620 bnx2x_init_fill(bp,
5621 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5622 0, STORM_INTMEM_SIZE_E1H/2);
5623 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5624 STORM_INTMEM_SIZE_E1H/2);
5625 bnx2x_init_fill(bp,
5626 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5627 0, STORM_INTMEM_SIZE_E1H/2);
5628 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5629 STORM_INTMEM_SIZE_E1H/2);
5630 bnx2x_init_fill(bp,
5631 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5632 0, STORM_INTMEM_SIZE_E1H/2);
5633 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5634 STORM_INTMEM_SIZE_E1H/2);
5635 bnx2x_init_fill(bp,
5636 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5637 0, STORM_INTMEM_SIZE_E1H/2);
5638 } else { /* E1 */
ad8d3948
EG
5639 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5640 STORM_INTMEM_SIZE_E1);
5641 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5642 STORM_INTMEM_SIZE_E1);
5643 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5644 STORM_INTMEM_SIZE_E1);
5645 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5646 STORM_INTMEM_SIZE_E1);
34f80b04 5647 }
a2fbb9ea 5648
34f80b04
EG
5649 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5650 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5651 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5652 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5653
34f80b04
EG
5654 /* sync semi rtc */
5655 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5656 0x80000000);
5657 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5658 0x80000000);
a2fbb9ea 5659
34f80b04
EG
5660 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5661 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5662 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5663
34f80b04
EG
5664 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5665 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5666 REG_WR(bp, i, 0xc0cac01a);
5667 /* TODO: replace with something meaningful */
5668 }
8d9c5f34 5669 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5670 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5671
34f80b04
EG
5672 if (sizeof(union cdu_context) != 1024)
5673 /* we currently assume that a context is 1024 bytes */
5674 printk(KERN_ALERT PFX "please adjust the size of"
5675 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5676
34f80b04
EG
5677 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5678 val = (4 << 24) + (0 << 12) + 1024;
5679 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5680 if (CHIP_IS_E1(bp)) {
5681 /* !!! fix pxp client crdit until excel update */
5682 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5683 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5684 }
a2fbb9ea 5685
34f80b04
EG
5686 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5687 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5688 /* enable context validation interrupt from CFC */
5689 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5690
5691 /* set the thresholds to prevent CFC/CDU race */
5692 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5693
34f80b04
EG
5694 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5695 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5696
34f80b04
EG
5697 /* PXPCS COMMON comes here */
5698 /* Reset PCIE errors for debug */
5699 REG_WR(bp, 0x2814, 0xffffffff);
5700 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5701
34f80b04
EG
5702 /* EMAC0 COMMON comes here */
5703 /* EMAC1 COMMON comes here */
5704 /* DBU COMMON comes here */
5705 /* DBG COMMON comes here */
5706
5707 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5708 if (CHIP_IS_E1H(bp)) {
5709 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5710 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5711 }
5712
5713 if (CHIP_REV_IS_SLOW(bp))
5714 msleep(200);
5715
5716 /* finish CFC init */
5717 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5718 if (val != 1) {
5719 BNX2X_ERR("CFC LL_INIT failed\n");
5720 return -EBUSY;
5721 }
5722 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5723 if (val != 1) {
5724 BNX2X_ERR("CFC AC_INIT failed\n");
5725 return -EBUSY;
5726 }
5727 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5728 if (val != 1) {
5729 BNX2X_ERR("CFC CAM_INIT failed\n");
5730 return -EBUSY;
5731 }
5732 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5733
34f80b04
EG
5734 /* read NIG statistic
5735 to see if this is our first up since powerup */
5736 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5737 val = *bnx2x_sp(bp, wb_data[0]);
5738
5739 /* do internal memory self test */
5740 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5741 BNX2X_ERR("internal mem self test failed\n");
5742 return -EBUSY;
5743 }
5744
35b19ba5 5745 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5746 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5747 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5748 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5749 bp->port.need_hw_lock = 1;
5750 break;
5751
35b19ba5 5752 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5753 /* Fan failure is indicated by SPIO 5 */
5754 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5755 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5756
5757 /* set to active low mode */
5758 val = REG_RD(bp, MISC_REG_SPIO_INT);
5759 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5760 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5761 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5762
34f80b04
EG
5763 /* enable interrupt to signal the IGU */
5764 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5765 val |= (1 << MISC_REGISTERS_SPIO_5);
5766 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5767 break;
f1410647 5768
34f80b04
EG
5769 default:
5770 break;
5771 }
f1410647 5772
34f80b04
EG
5773 /* clear PXP2 attentions */
5774 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5775
34f80b04 5776 enable_blocks_attention(bp);
a2fbb9ea 5777
6bbca910
YR
5778 if (!BP_NOMCP(bp)) {
5779 bnx2x_acquire_phy_lock(bp);
5780 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5781 bnx2x_release_phy_lock(bp);
5782 } else
5783 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5784
34f80b04
EG
5785 return 0;
5786}
a2fbb9ea 5787
34f80b04
EG
5788static int bnx2x_init_port(struct bnx2x *bp)
5789{
5790 int port = BP_PORT(bp);
1c06328c 5791 u32 low, high;
34f80b04 5792 u32 val;
a2fbb9ea 5793
34f80b04
EG
5794 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5795
5796 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5797
5798 /* Port PXP comes here */
5799 /* Port PXP2 comes here */
a2fbb9ea
ET
5800#ifdef BCM_ISCSI
5801 /* Port0 1
5802 * Port1 385 */
5803 i++;
5804 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5805 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5806 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5807 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5808
5809 /* Port0 2
5810 * Port1 386 */
5811 i++;
5812 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5813 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5814 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5815 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5816
5817 /* Port0 3
5818 * Port1 387 */
5819 i++;
5820 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5821 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5822 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5823 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5824#endif
34f80b04 5825 /* Port CMs come here */
8d9c5f34
EG
5826 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5827 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5828
5829 /* Port QM comes here */
a2fbb9ea
ET
5830#ifdef BCM_ISCSI
5831 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5832 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5833
5834 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5835 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5836#endif
5837 /* Port DQ comes here */
1c06328c
EG
5838
5839 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5840 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5841 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5842 /* no pause for emulation and FPGA */
5843 low = 0;
5844 high = 513;
5845 } else {
5846 if (IS_E1HMF(bp))
5847 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5848 else if (bp->dev->mtu > 4096) {
5849 if (bp->flags & ONE_PORT_FLAG)
5850 low = 160;
5851 else {
5852 val = bp->dev->mtu;
5853 /* (24*1024 + val*4)/256 */
5854 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5855 }
5856 } else
5857 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5858 high = low + 56; /* 14*1024/256 */
5859 }
5860 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5861 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5862
5863
ad8d3948 5864 /* Port PRS comes here */
a2fbb9ea
ET
5865 /* Port TSDM comes here */
5866 /* Port CSDM comes here */
5867 /* Port USDM comes here */
5868 /* Port XSDM comes here */
356e2385 5869
34f80b04
EG
5870 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5871 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5872 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5873 port ? USEM_PORT1_END : USEM_PORT0_END);
5874 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5875 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5876 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5877 port ? XSEM_PORT1_END : XSEM_PORT0_END);
356e2385 5878
a2fbb9ea 5879 /* Port UPB comes here */
34f80b04
EG
5880 /* Port XPB comes here */
5881
5882 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5883 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5884
5885 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5886 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5887
5888 /* update threshold */
34f80b04 5889 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5890 /* update init credit */
34f80b04 5891 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5892
5893 /* probe changes */
34f80b04 5894 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5895 msleep(5);
34f80b04 5896 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5897
5898#ifdef BCM_ISCSI
5899 /* tell the searcher where the T2 table is */
5900 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5901
5902 wb_write[0] = U64_LO(bp->t2_mapping);
5903 wb_write[1] = U64_HI(bp->t2_mapping);
5904 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5905 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5906 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5907 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5908
5909 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5910 /* Port SRCH comes here */
5911#endif
5912 /* Port CDU comes here */
5913 /* Port CFC comes here */
34f80b04
EG
5914
5915 if (CHIP_IS_E1(bp)) {
5916 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5917 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5918 }
5919 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5920 port ? HC_PORT1_END : HC_PORT0_END);
5921
5922 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5923 MISC_AEU_PORT0_START,
34f80b04
EG
5924 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5925 /* init aeu_mask_attn_func_0/1:
5926 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5927 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5928 * bits 4-7 are used for "per vn group attention" */
5929 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5930 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5931
a2fbb9ea
ET
5932 /* Port PXPCS comes here */
5933 /* Port EMAC0 comes here */
5934 /* Port EMAC1 comes here */
5935 /* Port DBU comes here */
5936 /* Port DBG comes here */
356e2385 5937
34f80b04
EG
5938 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5939 port ? NIG_PORT1_END : NIG_PORT0_END);
5940
5941 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5942
5943 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5944 /* 0x2 disable e1hov, 0x1 enable */
5945 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5946 (IS_E1HMF(bp) ? 0x1 : 0x2));
5947
1c06328c
EG
5948 /* support pause requests from USDM, TSDM and BRB */
5949 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5950
5951 {
5952 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5953 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5954 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5955 }
34f80b04
EG
5956 }
5957
a2fbb9ea
ET
5958 /* Port MCP comes here */
5959 /* Port DMAE comes here */
5960
35b19ba5 5961 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5962 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5963 {
5964 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5965
5966 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5967 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5968
5969 /* The GPIO should be swapped if the swap register is
5970 set and active */
5971 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5972 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5973
5974 /* Select function upon port-swap configuration */
5975 if (port == 0) {
5976 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5977 aeu_gpio_mask = (swap_val && swap_override) ?
5978 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5979 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5980 } else {
5981 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5982 aeu_gpio_mask = (swap_val && swap_override) ?
5983 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5984 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5985 }
5986 val = REG_RD(bp, offset);
5987 /* add GPIO3 to group */
5988 val |= aeu_gpio_mask;
5989 REG_WR(bp, offset, val);
5990 }
5991 break;
5992
35b19ba5 5993 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5994 /* add SPIO 5 to group 0 */
5995 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5996 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5997 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5998 break;
5999
6000 default:
6001 break;
6002 }
6003
c18487ee 6004 bnx2x__link_reset(bp);
a2fbb9ea 6005
34f80b04
EG
6006 return 0;
6007}
6008
6009#define ILT_PER_FUNC (768/2)
6010#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6011/* the phys address is shifted right 12 bits and has an added
6012 1=valid bit added to the 53rd bit
6013 then since this is a wide register(TM)
6014 we split it into two 32 bit writes
6015 */
6016#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6017#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6018#define PXP_ONE_ILT(x) (((x) << 10) | x)
6019#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6020
6021#define CNIC_ILT_LINES 0
6022
6023static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6024{
6025 int reg;
6026
6027 if (CHIP_IS_E1H(bp))
6028 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6029 else /* E1 */
6030 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6031
6032 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6033}
6034
6035static int bnx2x_init_func(struct bnx2x *bp)
6036{
6037 int port = BP_PORT(bp);
6038 int func = BP_FUNC(bp);
8badd27a 6039 u32 addr, val;
34f80b04
EG
6040 int i;
6041
6042 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6043
8badd27a
EG
6044 /* set MSI reconfigure capability */
6045 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6046 val = REG_RD(bp, addr);
6047 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6048 REG_WR(bp, addr, val);
6049
34f80b04
EG
6050 i = FUNC_ILT_BASE(func);
6051
6052 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6053 if (CHIP_IS_E1H(bp)) {
6054 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6055 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6056 } else /* E1 */
6057 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6058 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6059
6060
6061 if (CHIP_IS_E1H(bp)) {
6062 for (i = 0; i < 9; i++)
6063 bnx2x_init_block(bp,
6064 cm_start[func][i], cm_end[func][i]);
6065
6066 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6067 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6068 }
6069
6070 /* HC init per function */
6071 if (CHIP_IS_E1H(bp)) {
6072 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6073
6074 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6075 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6076 }
6077 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6078
c14423fe 6079 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6080 REG_WR(bp, 0x2114, 0xffffffff);
6081 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6082
34f80b04
EG
6083 return 0;
6084}
6085
6086static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6087{
6088 int i, rc = 0;
a2fbb9ea 6089
34f80b04
EG
6090 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6091 BP_FUNC(bp), load_code);
a2fbb9ea 6092
34f80b04
EG
6093 bp->dmae_ready = 0;
6094 mutex_init(&bp->dmae_mutex);
6095 bnx2x_gunzip_init(bp);
a2fbb9ea 6096
34f80b04
EG
6097 switch (load_code) {
6098 case FW_MSG_CODE_DRV_LOAD_COMMON:
6099 rc = bnx2x_init_common(bp);
6100 if (rc)
6101 goto init_hw_err;
6102 /* no break */
6103
6104 case FW_MSG_CODE_DRV_LOAD_PORT:
6105 bp->dmae_ready = 1;
6106 rc = bnx2x_init_port(bp);
6107 if (rc)
6108 goto init_hw_err;
6109 /* no break */
6110
6111 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6112 bp->dmae_ready = 1;
6113 rc = bnx2x_init_func(bp);
6114 if (rc)
6115 goto init_hw_err;
6116 break;
6117
6118 default:
6119 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6120 break;
6121 }
6122
6123 if (!BP_NOMCP(bp)) {
6124 int func = BP_FUNC(bp);
a2fbb9ea
ET
6125
6126 bp->fw_drv_pulse_wr_seq =
34f80b04 6127 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6128 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6129 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6130 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6131 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6132 } else
6133 bp->func_stx = 0;
a2fbb9ea 6134
34f80b04
EG
6135 /* this needs to be done before gunzip end */
6136 bnx2x_zero_def_sb(bp);
6137 for_each_queue(bp, i)
6138 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6139
6140init_hw_err:
6141 bnx2x_gunzip_end(bp);
6142
6143 return rc;
a2fbb9ea
ET
6144}
6145
c14423fe 6146/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6147static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6148{
34f80b04 6149 int func = BP_FUNC(bp);
f1410647
ET
6150 u32 seq = ++bp->fw_seq;
6151 u32 rc = 0;
19680c48
EG
6152 u32 cnt = 1;
6153 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6154
34f80b04 6155 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6156 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6157
19680c48
EG
6158 do {
6159 /* let the FW do it's magic ... */
6160 msleep(delay);
a2fbb9ea 6161
19680c48 6162 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6163
19680c48
EG
6164 /* Give the FW up to 2 second (200*10ms) */
6165 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6166
6167 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6168 cnt*delay, rc, seq);
a2fbb9ea
ET
6169
6170 /* is this a reply to our command? */
6171 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6172 rc &= FW_MSG_CODE_MASK;
f1410647 6173
a2fbb9ea
ET
6174 } else {
6175 /* FW BUG! */
6176 BNX2X_ERR("FW failed to respond!\n");
6177 bnx2x_fw_dump(bp);
6178 rc = 0;
6179 }
f1410647 6180
a2fbb9ea
ET
6181 return rc;
6182}
6183
6184static void bnx2x_free_mem(struct bnx2x *bp)
6185{
6186
6187#define BNX2X_PCI_FREE(x, y, size) \
6188 do { \
6189 if (x) { \
6190 pci_free_consistent(bp->pdev, size, x, y); \
6191 x = NULL; \
6192 y = 0; \
6193 } \
6194 } while (0)
6195
6196#define BNX2X_FREE(x) \
6197 do { \
6198 if (x) { \
6199 vfree(x); \
6200 x = NULL; \
6201 } \
6202 } while (0)
6203
6204 int i;
6205
6206 /* fastpath */
555f6c78 6207 /* Common */
a2fbb9ea
ET
6208 for_each_queue(bp, i) {
6209
555f6c78 6210 /* status blocks */
a2fbb9ea
ET
6211 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6212 bnx2x_fp(bp, i, status_blk_mapping),
6213 sizeof(struct host_status_block) +
6214 sizeof(struct eth_tx_db_data));
555f6c78
EG
6215 }
6216 /* Rx */
6217 for_each_rx_queue(bp, i) {
a2fbb9ea 6218
555f6c78 6219 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6220 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6221 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6222 bnx2x_fp(bp, i, rx_desc_mapping),
6223 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6224
6225 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6226 bnx2x_fp(bp, i, rx_comp_mapping),
6227 sizeof(struct eth_fast_path_rx_cqe) *
6228 NUM_RCQ_BD);
a2fbb9ea 6229
7a9b2557 6230 /* SGE ring */
32626230 6231 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6232 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6233 bnx2x_fp(bp, i, rx_sge_mapping),
6234 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6235 }
555f6c78
EG
6236 /* Tx */
6237 for_each_tx_queue(bp, i) {
6238
6239 /* fastpath tx rings: tx_buf tx_desc */
6240 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6241 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6242 bnx2x_fp(bp, i, tx_desc_mapping),
6243 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6244 }
a2fbb9ea
ET
6245 /* end of fastpath */
6246
6247 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6248 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6249
6250 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6251 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6252
6253#ifdef BCM_ISCSI
6254 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6255 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6256 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6257 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6258#endif
7a9b2557 6259 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6260
6261#undef BNX2X_PCI_FREE
6262#undef BNX2X_KFREE
6263}
6264
6265static int bnx2x_alloc_mem(struct bnx2x *bp)
6266{
6267
6268#define BNX2X_PCI_ALLOC(x, y, size) \
6269 do { \
6270 x = pci_alloc_consistent(bp->pdev, size, y); \
6271 if (x == NULL) \
6272 goto alloc_mem_err; \
6273 memset(x, 0, size); \
6274 } while (0)
6275
6276#define BNX2X_ALLOC(x, size) \
6277 do { \
6278 x = vmalloc(size); \
6279 if (x == NULL) \
6280 goto alloc_mem_err; \
6281 memset(x, 0, size); \
6282 } while (0)
6283
6284 int i;
6285
6286 /* fastpath */
555f6c78 6287 /* Common */
a2fbb9ea
ET
6288 for_each_queue(bp, i) {
6289 bnx2x_fp(bp, i, bp) = bp;
6290
555f6c78 6291 /* status blocks */
a2fbb9ea
ET
6292 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6293 &bnx2x_fp(bp, i, status_blk_mapping),
6294 sizeof(struct host_status_block) +
6295 sizeof(struct eth_tx_db_data));
555f6c78
EG
6296 }
6297 /* Rx */
6298 for_each_rx_queue(bp, i) {
a2fbb9ea 6299
555f6c78 6300 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6301 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6302 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6303 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6304 &bnx2x_fp(bp, i, rx_desc_mapping),
6305 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6306
6307 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6308 &bnx2x_fp(bp, i, rx_comp_mapping),
6309 sizeof(struct eth_fast_path_rx_cqe) *
6310 NUM_RCQ_BD);
6311
7a9b2557
VZ
6312 /* SGE ring */
6313 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6314 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6315 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6316 &bnx2x_fp(bp, i, rx_sge_mapping),
6317 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6318 }
555f6c78
EG
6319 /* Tx */
6320 for_each_tx_queue(bp, i) {
6321
6322 bnx2x_fp(bp, i, hw_tx_prods) =
6323 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6324
6325 bnx2x_fp(bp, i, tx_prods_mapping) =
6326 bnx2x_fp(bp, i, status_blk_mapping) +
6327 sizeof(struct host_status_block);
6328
6329 /* fastpath tx rings: tx_buf tx_desc */
6330 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6331 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6332 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6333 &bnx2x_fp(bp, i, tx_desc_mapping),
6334 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6335 }
a2fbb9ea
ET
6336 /* end of fastpath */
6337
6338 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6339 sizeof(struct host_def_status_block));
6340
6341 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6342 sizeof(struct bnx2x_slowpath));
6343
6344#ifdef BCM_ISCSI
6345 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6346
6347 /* Initialize T1 */
6348 for (i = 0; i < 64*1024; i += 64) {
6349 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6350 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6351 }
6352
6353 /* allocate searcher T2 table
6354 we allocate 1/4 of alloc num for T2
6355 (which is not entered into the ILT) */
6356 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6357
6358 /* Initialize T2 */
6359 for (i = 0; i < 16*1024; i += 64)
6360 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6361
c14423fe 6362 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6363 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6364
6365 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6366 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6367
6368 /* QM queues (128*MAX_CONN) */
6369 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6370#endif
6371
6372 /* Slow path ring */
6373 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6374
6375 return 0;
6376
6377alloc_mem_err:
6378 bnx2x_free_mem(bp);
6379 return -ENOMEM;
6380
6381#undef BNX2X_PCI_ALLOC
6382#undef BNX2X_ALLOC
6383}
6384
6385static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6386{
6387 int i;
6388
555f6c78 6389 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6390 struct bnx2x_fastpath *fp = &bp->fp[i];
6391
6392 u16 bd_cons = fp->tx_bd_cons;
6393 u16 sw_prod = fp->tx_pkt_prod;
6394 u16 sw_cons = fp->tx_pkt_cons;
6395
a2fbb9ea
ET
6396 while (sw_cons != sw_prod) {
6397 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6398 sw_cons++;
6399 }
6400 }
6401}
6402
6403static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6404{
6405 int i, j;
6406
555f6c78 6407 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6408 struct bnx2x_fastpath *fp = &bp->fp[j];
6409
a2fbb9ea
ET
6410 for (i = 0; i < NUM_RX_BD; i++) {
6411 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6412 struct sk_buff *skb = rx_buf->skb;
6413
6414 if (skb == NULL)
6415 continue;
6416
6417 pci_unmap_single(bp->pdev,
6418 pci_unmap_addr(rx_buf, mapping),
356e2385 6419 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6420
6421 rx_buf->skb = NULL;
6422 dev_kfree_skb(skb);
6423 }
7a9b2557 6424 if (!fp->disable_tpa)
32626230
EG
6425 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6426 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6427 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6428 }
6429}
6430
6431static void bnx2x_free_skbs(struct bnx2x *bp)
6432{
6433 bnx2x_free_tx_skbs(bp);
6434 bnx2x_free_rx_skbs(bp);
6435}
6436
6437static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6438{
34f80b04 6439 int i, offset = 1;
a2fbb9ea
ET
6440
6441 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6442 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6443 bp->msix_table[0].vector);
6444
6445 for_each_queue(bp, i) {
c14423fe 6446 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6447 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6448 bnx2x_fp(bp, i, state));
6449
34f80b04 6450 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6451 }
a2fbb9ea
ET
6452}
6453
6454static void bnx2x_free_irq(struct bnx2x *bp)
6455{
a2fbb9ea 6456 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6457 bnx2x_free_msix_irqs(bp);
6458 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6459 bp->flags &= ~USING_MSIX_FLAG;
6460
8badd27a
EG
6461 } else if (bp->flags & USING_MSI_FLAG) {
6462 free_irq(bp->pdev->irq, bp->dev);
6463 pci_disable_msi(bp->pdev);
6464 bp->flags &= ~USING_MSI_FLAG;
6465
a2fbb9ea
ET
6466 } else
6467 free_irq(bp->pdev->irq, bp->dev);
6468}
6469
6470static int bnx2x_enable_msix(struct bnx2x *bp)
6471{
8badd27a
EG
6472 int i, rc, offset = 1;
6473 int igu_vec = 0;
a2fbb9ea 6474
8badd27a
EG
6475 bp->msix_table[0].entry = igu_vec;
6476 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6477
34f80b04 6478 for_each_queue(bp, i) {
8badd27a 6479 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6480 bp->msix_table[i + offset].entry = igu_vec;
6481 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6482 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6483 }
6484
34f80b04 6485 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6486 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6487 if (rc) {
8badd27a
EG
6488 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6489 return rc;
34f80b04 6490 }
8badd27a 6491
a2fbb9ea
ET
6492 bp->flags |= USING_MSIX_FLAG;
6493
6494 return 0;
a2fbb9ea
ET
6495}
6496
a2fbb9ea
ET
6497static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6498{
34f80b04 6499 int i, rc, offset = 1;
a2fbb9ea 6500
a2fbb9ea
ET
6501 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6502 bp->dev->name, bp->dev);
a2fbb9ea
ET
6503 if (rc) {
6504 BNX2X_ERR("request sp irq failed\n");
6505 return -EBUSY;
6506 }
6507
6508 for_each_queue(bp, i) {
555f6c78
EG
6509 struct bnx2x_fastpath *fp = &bp->fp[i];
6510
6511 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6512 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6513 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6514 if (rc) {
555f6c78 6515 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6516 bnx2x_free_msix_irqs(bp);
6517 return -EBUSY;
6518 }
6519
555f6c78 6520 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6521 }
6522
555f6c78
EG
6523 i = BNX2X_NUM_QUEUES(bp);
6524 if (is_multi(bp))
6525 printk(KERN_INFO PFX
6526 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6527 bp->dev->name, bp->msix_table[0].vector,
6528 bp->msix_table[offset].vector,
6529 bp->msix_table[offset + i - 1].vector);
6530 else
6531 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6532 bp->dev->name, bp->msix_table[0].vector,
6533 bp->msix_table[offset + i - 1].vector);
6534
a2fbb9ea 6535 return 0;
a2fbb9ea
ET
6536}
6537
8badd27a
EG
6538static int bnx2x_enable_msi(struct bnx2x *bp)
6539{
6540 int rc;
6541
6542 rc = pci_enable_msi(bp->pdev);
6543 if (rc) {
6544 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6545 return -1;
6546 }
6547 bp->flags |= USING_MSI_FLAG;
6548
6549 return 0;
6550}
6551
a2fbb9ea
ET
6552static int bnx2x_req_irq(struct bnx2x *bp)
6553{
8badd27a 6554 unsigned long flags;
34f80b04 6555 int rc;
a2fbb9ea 6556
8badd27a
EG
6557 if (bp->flags & USING_MSI_FLAG)
6558 flags = 0;
6559 else
6560 flags = IRQF_SHARED;
6561
6562 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6563 bp->dev->name, bp->dev);
a2fbb9ea
ET
6564 if (!rc)
6565 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6566
6567 return rc;
a2fbb9ea
ET
6568}
6569
65abd74d
YG
6570static void bnx2x_napi_enable(struct bnx2x *bp)
6571{
6572 int i;
6573
555f6c78 6574 for_each_rx_queue(bp, i)
65abd74d
YG
6575 napi_enable(&bnx2x_fp(bp, i, napi));
6576}
6577
6578static void bnx2x_napi_disable(struct bnx2x *bp)
6579{
6580 int i;
6581
555f6c78 6582 for_each_rx_queue(bp, i)
65abd74d
YG
6583 napi_disable(&bnx2x_fp(bp, i, napi));
6584}
6585
6586static void bnx2x_netif_start(struct bnx2x *bp)
6587{
6588 if (atomic_dec_and_test(&bp->intr_sem)) {
6589 if (netif_running(bp->dev)) {
65abd74d
YG
6590 bnx2x_napi_enable(bp);
6591 bnx2x_int_enable(bp);
555f6c78
EG
6592 if (bp->state == BNX2X_STATE_OPEN)
6593 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6594 }
6595 }
6596}
6597
f8ef6e44 6598static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6599{
f8ef6e44 6600 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6601 bnx2x_napi_disable(bp);
65abd74d 6602 if (netif_running(bp->dev)) {
65abd74d
YG
6603 netif_tx_disable(bp->dev);
6604 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6605 }
6606}
6607
a2fbb9ea
ET
6608/*
6609 * Init service functions
6610 */
6611
3101c2bc 6612static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6613{
6614 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6615 int port = BP_PORT(bp);
a2fbb9ea
ET
6616
6617 /* CAM allocation
6618 * unicasts 0-31:port0 32-63:port1
6619 * multicast 64-127:port0 128-191:port1
6620 */
8d9c5f34 6621 config->hdr.length = 2;
af246401 6622 config->hdr.offset = port ? 32 : 0;
0626b899 6623 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6624 config->hdr.reserved1 = 0;
6625
6626 /* primary MAC */
6627 config->config_table[0].cam_entry.msb_mac_addr =
6628 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6629 config->config_table[0].cam_entry.middle_mac_addr =
6630 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6631 config->config_table[0].cam_entry.lsb_mac_addr =
6632 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6633 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6634 if (set)
6635 config->config_table[0].target_table_entry.flags = 0;
6636 else
6637 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6638 config->config_table[0].target_table_entry.client_id = 0;
6639 config->config_table[0].target_table_entry.vlan_id = 0;
6640
3101c2bc
YG
6641 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6642 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6643 config->config_table[0].cam_entry.msb_mac_addr,
6644 config->config_table[0].cam_entry.middle_mac_addr,
6645 config->config_table[0].cam_entry.lsb_mac_addr);
6646
6647 /* broadcast */
4781bfad
EG
6648 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6649 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6650 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6651 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6652 if (set)
6653 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6654 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6655 else
6656 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6657 config->config_table[1].target_table_entry.client_id = 0;
6658 config->config_table[1].target_table_entry.vlan_id = 0;
6659
6660 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6661 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6662 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6663}
6664
3101c2bc 6665static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6666{
6667 struct mac_configuration_cmd_e1h *config =
6668 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6669
3101c2bc 6670 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6671 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6672 return;
6673 }
6674
6675 /* CAM allocation for E1H
6676 * unicasts: by func number
6677 * multicast: 20+FUNC*20, 20 each
6678 */
8d9c5f34 6679 config->hdr.length = 1;
34f80b04 6680 config->hdr.offset = BP_FUNC(bp);
0626b899 6681 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6682 config->hdr.reserved1 = 0;
6683
6684 /* primary MAC */
6685 config->config_table[0].msb_mac_addr =
6686 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6687 config->config_table[0].middle_mac_addr =
6688 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6689 config->config_table[0].lsb_mac_addr =
6690 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6691 config->config_table[0].client_id = BP_L_ID(bp);
6692 config->config_table[0].vlan_id = 0;
6693 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6694 if (set)
6695 config->config_table[0].flags = BP_PORT(bp);
6696 else
6697 config->config_table[0].flags =
6698 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6699
3101c2bc
YG
6700 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6701 (set ? "setting" : "clearing"),
34f80b04
EG
6702 config->config_table[0].msb_mac_addr,
6703 config->config_table[0].middle_mac_addr,
6704 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6705
6706 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6707 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6708 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6709}
6710
a2fbb9ea
ET
6711static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6712 int *state_p, int poll)
6713{
6714 /* can take a while if any port is running */
8b3a0f0b 6715 int cnt = 5000;
a2fbb9ea 6716
c14423fe
ET
6717 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6718 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6719
6720 might_sleep();
34f80b04 6721 while (cnt--) {
a2fbb9ea
ET
6722 if (poll) {
6723 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6724 /* if index is different from 0
6725 * the reply for some commands will
3101c2bc 6726 * be on the non default queue
a2fbb9ea
ET
6727 */
6728 if (idx)
6729 bnx2x_rx_int(&bp->fp[idx], 10);
6730 }
a2fbb9ea 6731
3101c2bc 6732 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6733 if (*state_p == state) {
6734#ifdef BNX2X_STOP_ON_ERROR
6735 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6736#endif
a2fbb9ea 6737 return 0;
8b3a0f0b 6738 }
a2fbb9ea 6739
a2fbb9ea 6740 msleep(1);
a2fbb9ea
ET
6741 }
6742
a2fbb9ea 6743 /* timeout! */
49d66772
ET
6744 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6745 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6746#ifdef BNX2X_STOP_ON_ERROR
6747 bnx2x_panic();
6748#endif
a2fbb9ea 6749
49d66772 6750 return -EBUSY;
a2fbb9ea
ET
6751}
6752
6753static int bnx2x_setup_leading(struct bnx2x *bp)
6754{
34f80b04 6755 int rc;
a2fbb9ea 6756
c14423fe 6757 /* reset IGU state */
34f80b04 6758 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6759
6760 /* SETUP ramrod */
6761 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6762
34f80b04
EG
6763 /* Wait for completion */
6764 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6765
34f80b04 6766 return rc;
a2fbb9ea
ET
6767}
6768
6769static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6770{
555f6c78
EG
6771 struct bnx2x_fastpath *fp = &bp->fp[index];
6772
a2fbb9ea 6773 /* reset IGU state */
555f6c78 6774 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6775
228241eb 6776 /* SETUP ramrod */
555f6c78
EG
6777 fp->state = BNX2X_FP_STATE_OPENING;
6778 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6779 fp->cl_id, 0);
a2fbb9ea
ET
6780
6781 /* Wait for completion */
6782 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6783 &(fp->state), 0);
a2fbb9ea
ET
6784}
6785
a2fbb9ea 6786static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6787
8badd27a 6788static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6789{
555f6c78 6790 int num_queues;
a2fbb9ea 6791
8badd27a
EG
6792 switch (int_mode) {
6793 case INT_MODE_INTx:
6794 case INT_MODE_MSI:
555f6c78
EG
6795 num_queues = 1;
6796 bp->num_rx_queues = num_queues;
6797 bp->num_tx_queues = num_queues;
6798 DP(NETIF_MSG_IFUP,
6799 "set number of queues to %d\n", num_queues);
8badd27a
EG
6800 break;
6801
6802 case INT_MODE_MSIX:
6803 default:
555f6c78
EG
6804 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6805 num_queues = min_t(u32, num_online_cpus(),
6806 BNX2X_MAX_QUEUES(bp));
34f80b04 6807 else
555f6c78
EG
6808 num_queues = 1;
6809 bp->num_rx_queues = num_queues;
6810 bp->num_tx_queues = num_queues;
6811 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6812 " number of tx queues to %d\n",
6813 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6814 /* if we can't use MSI-X we only need one fp,
6815 * so try to enable MSI-X with the requested number of fp's
6816 * and fallback to MSI or legacy INTx with one fp
6817 */
8badd27a 6818 if (bnx2x_enable_msix(bp)) {
34f80b04 6819 /* failed to enable MSI-X */
555f6c78
EG
6820 num_queues = 1;
6821 bp->num_rx_queues = num_queues;
6822 bp->num_tx_queues = num_queues;
6823 if (bp->multi_mode)
6824 BNX2X_ERR("Multi requested but failed to "
6825 "enable MSI-X set number of "
6826 "queues to %d\n", num_queues);
a2fbb9ea 6827 }
8badd27a 6828 break;
a2fbb9ea 6829 }
555f6c78 6830 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6831}
6832
6833static void bnx2x_set_rx_mode(struct net_device *dev);
6834
6835/* must be called with rtnl_lock */
6836static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6837{
6838 u32 load_code;
6839 int i, rc = 0;
6840#ifdef BNX2X_STOP_ON_ERROR
6841 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6842 if (unlikely(bp->panic))
6843 return -EPERM;
6844#endif
6845
6846 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6847
6848 bnx2x_set_int_mode(bp);
c14423fe 6849
a2fbb9ea
ET
6850 if (bnx2x_alloc_mem(bp))
6851 return -ENOMEM;
6852
555f6c78 6853 for_each_rx_queue(bp, i)
7a9b2557
VZ
6854 bnx2x_fp(bp, i, disable_tpa) =
6855 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6856
555f6c78 6857 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6858 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6859 bnx2x_poll, 128);
6860
6861#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6862 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6863 struct bnx2x_fastpath *fp = &bp->fp[i];
6864
6865 fp->poll_no_work = 0;
6866 fp->poll_calls = 0;
6867 fp->poll_max_calls = 0;
6868 fp->poll_complete = 0;
6869 fp->poll_exit = 0;
6870 }
6871#endif
6872 bnx2x_napi_enable(bp);
6873
34f80b04
EG
6874 if (bp->flags & USING_MSIX_FLAG) {
6875 rc = bnx2x_req_msix_irqs(bp);
6876 if (rc) {
6877 pci_disable_msix(bp->pdev);
2dfe0e1f 6878 goto load_error1;
34f80b04
EG
6879 }
6880 } else {
8badd27a
EG
6881 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6882 bnx2x_enable_msi(bp);
34f80b04
EG
6883 bnx2x_ack_int(bp);
6884 rc = bnx2x_req_irq(bp);
6885 if (rc) {
2dfe0e1f 6886 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6887 if (bp->flags & USING_MSI_FLAG)
6888 pci_disable_msi(bp->pdev);
2dfe0e1f 6889 goto load_error1;
a2fbb9ea 6890 }
8badd27a
EG
6891 if (bp->flags & USING_MSI_FLAG) {
6892 bp->dev->irq = bp->pdev->irq;
6893 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6894 bp->dev->name, bp->pdev->irq);
6895 }
a2fbb9ea
ET
6896 }
6897
2dfe0e1f
EG
6898 /* Send LOAD_REQUEST command to MCP
6899 Returns the type of LOAD command:
6900 if it is the first port to be initialized
6901 common blocks should be initialized, otherwise - not
6902 */
6903 if (!BP_NOMCP(bp)) {
6904 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6905 if (!load_code) {
6906 BNX2X_ERR("MCP response failure, aborting\n");
6907 rc = -EBUSY;
6908 goto load_error2;
6909 }
6910 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6911 rc = -EBUSY; /* other port in diagnostic mode */
6912 goto load_error2;
6913 }
6914
6915 } else {
6916 int port = BP_PORT(bp);
6917
f5372251 6918 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
6919 load_count[0], load_count[1], load_count[2]);
6920 load_count[0]++;
6921 load_count[1 + port]++;
f5372251 6922 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
6923 load_count[0], load_count[1], load_count[2]);
6924 if (load_count[0] == 1)
6925 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6926 else if (load_count[1 + port] == 1)
6927 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6928 else
6929 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6930 }
6931
6932 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6933 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6934 bp->port.pmf = 1;
6935 else
6936 bp->port.pmf = 0;
6937 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6938
a2fbb9ea 6939 /* Initialize HW */
34f80b04
EG
6940 rc = bnx2x_init_hw(bp, load_code);
6941 if (rc) {
a2fbb9ea 6942 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6943 goto load_error2;
a2fbb9ea
ET
6944 }
6945
a2fbb9ea 6946 /* Setup NIC internals and enable interrupts */
471de716 6947 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6948
6949 /* Send LOAD_DONE command to MCP */
34f80b04 6950 if (!BP_NOMCP(bp)) {
228241eb
ET
6951 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6952 if (!load_code) {
da5a662a 6953 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6954 rc = -EBUSY;
2dfe0e1f 6955 goto load_error3;
a2fbb9ea
ET
6956 }
6957 }
6958
6959 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6960
34f80b04
EG
6961 rc = bnx2x_setup_leading(bp);
6962 if (rc) {
da5a662a 6963 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6964 goto load_error3;
34f80b04 6965 }
a2fbb9ea 6966
34f80b04
EG
6967 if (CHIP_IS_E1H(bp))
6968 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 6969 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
6970 bp->state = BNX2X_STATE_DISABLED;
6971 }
a2fbb9ea 6972
34f80b04
EG
6973 if (bp->state == BNX2X_STATE_OPEN)
6974 for_each_nondefault_queue(bp, i) {
6975 rc = bnx2x_setup_multi(bp, i);
6976 if (rc)
2dfe0e1f 6977 goto load_error3;
34f80b04 6978 }
a2fbb9ea 6979
34f80b04 6980 if (CHIP_IS_E1(bp))
3101c2bc 6981 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6982 else
3101c2bc 6983 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6984
6985 if (bp->port.pmf)
b5bf9068 6986 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
6987
6988 /* Start fast path */
34f80b04
EG
6989 switch (load_mode) {
6990 case LOAD_NORMAL:
6991 /* Tx queue should be only reenabled */
555f6c78 6992 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6993 /* Initialize the receive filter. */
34f80b04
EG
6994 bnx2x_set_rx_mode(bp->dev);
6995 break;
6996
6997 case LOAD_OPEN:
555f6c78 6998 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6999 /* Initialize the receive filter. */
34f80b04 7000 bnx2x_set_rx_mode(bp->dev);
34f80b04 7001 break;
a2fbb9ea 7002
34f80b04 7003 case LOAD_DIAG:
2dfe0e1f 7004 /* Initialize the receive filter. */
a2fbb9ea 7005 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7006 bp->state = BNX2X_STATE_DIAG;
7007 break;
7008
7009 default:
7010 break;
a2fbb9ea
ET
7011 }
7012
34f80b04
EG
7013 if (!bp->port.pmf)
7014 bnx2x__link_status_update(bp);
7015
a2fbb9ea
ET
7016 /* start the timer */
7017 mod_timer(&bp->timer, jiffies + bp->current_interval);
7018
34f80b04 7019
a2fbb9ea
ET
7020 return 0;
7021
2dfe0e1f
EG
7022load_error3:
7023 bnx2x_int_disable_sync(bp, 1);
7024 if (!BP_NOMCP(bp)) {
7025 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7026 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7027 }
7028 bp->port.pmf = 0;
7a9b2557
VZ
7029 /* Free SKBs, SGEs, TPA pool and driver internals */
7030 bnx2x_free_skbs(bp);
555f6c78 7031 for_each_rx_queue(bp, i)
3196a88a 7032 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7033load_error2:
d1014634
YG
7034 /* Release IRQs */
7035 bnx2x_free_irq(bp);
2dfe0e1f
EG
7036load_error1:
7037 bnx2x_napi_disable(bp);
555f6c78 7038 for_each_rx_queue(bp, i)
7cde1c8b 7039 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7040 bnx2x_free_mem(bp);
7041
34f80b04 7042 return rc;
a2fbb9ea
ET
7043}
7044
7045static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7046{
555f6c78 7047 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7048 int rc;
7049
c14423fe 7050 /* halt the connection */
555f6c78
EG
7051 fp->state = BNX2X_FP_STATE_HALTING;
7052 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7053
34f80b04 7054 /* Wait for completion */
a2fbb9ea 7055 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7056 &(fp->state), 1);
c14423fe 7057 if (rc) /* timeout */
a2fbb9ea
ET
7058 return rc;
7059
7060 /* delete cfc entry */
7061 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7062
34f80b04
EG
7063 /* Wait for completion */
7064 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7065 &(fp->state), 1);
34f80b04 7066 return rc;
a2fbb9ea
ET
7067}
7068
da5a662a 7069static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7070{
4781bfad 7071 __le16 dsb_sp_prod_idx;
c14423fe 7072 /* if the other port is handling traffic,
a2fbb9ea 7073 this can take a lot of time */
34f80b04
EG
7074 int cnt = 500;
7075 int rc;
a2fbb9ea
ET
7076
7077 might_sleep();
7078
7079 /* Send HALT ramrod */
7080 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7081 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7082
34f80b04
EG
7083 /* Wait for completion */
7084 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7085 &(bp->fp[0].state), 1);
7086 if (rc) /* timeout */
da5a662a 7087 return rc;
a2fbb9ea 7088
49d66772 7089 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7090
228241eb 7091 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7092 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7093
49d66772 7094 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7095 we are going to reset the chip anyway
7096 so there is not much to do if this times out
7097 */
34f80b04 7098 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7099 if (!cnt) {
7100 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7101 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7102 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7103#ifdef BNX2X_STOP_ON_ERROR
7104 bnx2x_panic();
7105#endif
36e552ab 7106 rc = -EBUSY;
34f80b04
EG
7107 break;
7108 }
7109 cnt--;
da5a662a 7110 msleep(1);
5650d9d4 7111 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7112 }
7113 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7114 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7115
7116 return rc;
a2fbb9ea
ET
7117}
7118
34f80b04
EG
7119static void bnx2x_reset_func(struct bnx2x *bp)
7120{
7121 int port = BP_PORT(bp);
7122 int func = BP_FUNC(bp);
7123 int base, i;
7124
7125 /* Configure IGU */
7126 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7127 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7128
34f80b04
EG
7129 /* Clear ILT */
7130 base = FUNC_ILT_BASE(func);
7131 for (i = base; i < base + ILT_PER_FUNC; i++)
7132 bnx2x_ilt_wr(bp, i, 0);
7133}
7134
7135static void bnx2x_reset_port(struct bnx2x *bp)
7136{
7137 int port = BP_PORT(bp);
7138 u32 val;
7139
7140 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7141
7142 /* Do not rcv packets to BRB */
7143 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7144 /* Do not direct rcv packets that are not for MCP to the BRB */
7145 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7146 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7147
7148 /* Configure AEU */
7149 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7150
7151 msleep(100);
7152 /* Check for BRB port occupancy */
7153 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7154 if (val)
7155 DP(NETIF_MSG_IFDOWN,
33471629 7156 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7157
7158 /* TODO: Close Doorbell port? */
7159}
7160
34f80b04
EG
7161static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7162{
7163 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7164 BP_FUNC(bp), reset_code);
7165
7166 switch (reset_code) {
7167 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7168 bnx2x_reset_port(bp);
7169 bnx2x_reset_func(bp);
7170 bnx2x_reset_common(bp);
7171 break;
7172
7173 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7174 bnx2x_reset_port(bp);
7175 bnx2x_reset_func(bp);
7176 break;
7177
7178 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7179 bnx2x_reset_func(bp);
7180 break;
49d66772 7181
34f80b04
EG
7182 default:
7183 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7184 break;
7185 }
7186}
7187
33471629 7188/* must be called with rtnl_lock */
34f80b04 7189static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7190{
da5a662a 7191 int port = BP_PORT(bp);
a2fbb9ea 7192 u32 reset_code = 0;
da5a662a 7193 int i, cnt, rc;
a2fbb9ea
ET
7194
7195 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7196
228241eb
ET
7197 bp->rx_mode = BNX2X_RX_MODE_NONE;
7198 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7199
f8ef6e44 7200 bnx2x_netif_stop(bp, 1);
e94d8af3 7201
34f80b04
EG
7202 del_timer_sync(&bp->timer);
7203 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7204 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7205 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7206
70b9986c
EG
7207 /* Release IRQs */
7208 bnx2x_free_irq(bp);
7209
555f6c78
EG
7210 /* Wait until tx fastpath tasks complete */
7211 for_each_tx_queue(bp, i) {
228241eb
ET
7212 struct bnx2x_fastpath *fp = &bp->fp[i];
7213
34f80b04 7214 cnt = 1000;
e8b5fc51 7215 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7216
7961f791 7217 bnx2x_tx_int(fp);
34f80b04
EG
7218 if (!cnt) {
7219 BNX2X_ERR("timeout waiting for queue[%d]\n",
7220 i);
7221#ifdef BNX2X_STOP_ON_ERROR
7222 bnx2x_panic();
7223 return -EBUSY;
7224#else
7225 break;
7226#endif
7227 }
7228 cnt--;
da5a662a 7229 msleep(1);
34f80b04 7230 }
228241eb 7231 }
da5a662a
VZ
7232 /* Give HW time to discard old tx messages */
7233 msleep(1);
a2fbb9ea 7234
3101c2bc
YG
7235 if (CHIP_IS_E1(bp)) {
7236 struct mac_configuration_cmd *config =
7237 bnx2x_sp(bp, mcast_config);
7238
7239 bnx2x_set_mac_addr_e1(bp, 0);
7240
8d9c5f34 7241 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7242 CAM_INVALIDATE(config->config_table[i]);
7243
8d9c5f34 7244 config->hdr.length = i;
3101c2bc
YG
7245 if (CHIP_REV_IS_SLOW(bp))
7246 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7247 else
7248 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7249 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7250 config->hdr.reserved1 = 0;
7251
7252 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7253 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7254 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7255
7256 } else { /* E1H */
65abd74d
YG
7257 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7258
3101c2bc
YG
7259 bnx2x_set_mac_addr_e1h(bp, 0);
7260
7261 for (i = 0; i < MC_HASH_SIZE; i++)
7262 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7263 }
7264
65abd74d
YG
7265 if (unload_mode == UNLOAD_NORMAL)
7266 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7267
7268 else if (bp->flags & NO_WOL_FLAG) {
7269 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7270 if (CHIP_IS_E1H(bp))
7271 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7272
7273 } else if (bp->wol) {
7274 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7275 u8 *mac_addr = bp->dev->dev_addr;
7276 u32 val;
7277 /* The mac address is written to entries 1-4 to
7278 preserve entry 0 which is used by the PMF */
7279 u8 entry = (BP_E1HVN(bp) + 1)*8;
7280
7281 val = (mac_addr[0] << 8) | mac_addr[1];
7282 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7283
7284 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7285 (mac_addr[4] << 8) | mac_addr[5];
7286 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7287
7288 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7289
7290 } else
7291 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7292
34f80b04
EG
7293 /* Close multi and leading connections
7294 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7295 for_each_nondefault_queue(bp, i)
7296 if (bnx2x_stop_multi(bp, i))
228241eb 7297 goto unload_error;
a2fbb9ea 7298
da5a662a
VZ
7299 rc = bnx2x_stop_leading(bp);
7300 if (rc) {
34f80b04 7301 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7302#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7303 return -EBUSY;
da5a662a
VZ
7304#else
7305 goto unload_error;
34f80b04 7306#endif
228241eb
ET
7307 }
7308
7309unload_error:
34f80b04 7310 if (!BP_NOMCP(bp))
228241eb 7311 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7312 else {
f5372251 7313 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7314 load_count[0], load_count[1], load_count[2]);
7315 load_count[0]--;
da5a662a 7316 load_count[1 + port]--;
f5372251 7317 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7318 load_count[0], load_count[1], load_count[2]);
7319 if (load_count[0] == 0)
7320 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7321 else if (load_count[1 + port] == 0)
34f80b04
EG
7322 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7323 else
7324 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7325 }
a2fbb9ea 7326
34f80b04
EG
7327 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7328 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7329 bnx2x__link_reset(bp);
a2fbb9ea
ET
7330
7331 /* Reset the chip */
228241eb 7332 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7333
7334 /* Report UNLOAD_DONE to MCP */
34f80b04 7335 if (!BP_NOMCP(bp))
a2fbb9ea 7336 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7337
9a035440 7338 bp->port.pmf = 0;
a2fbb9ea 7339
7a9b2557 7340 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7341 bnx2x_free_skbs(bp);
555f6c78 7342 for_each_rx_queue(bp, i)
3196a88a 7343 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7344 for_each_rx_queue(bp, i)
7cde1c8b 7345 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7346 bnx2x_free_mem(bp);
7347
7348 bp->state = BNX2X_STATE_CLOSED;
228241eb 7349
a2fbb9ea
ET
7350 netif_carrier_off(bp->dev);
7351
7352 return 0;
7353}
7354
34f80b04
EG
7355static void bnx2x_reset_task(struct work_struct *work)
7356{
7357 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7358
7359#ifdef BNX2X_STOP_ON_ERROR
7360 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7361 " so reset not done to allow debug dump,\n"
7362 KERN_ERR " you will need to reboot when done\n");
7363 return;
7364#endif
7365
7366 rtnl_lock();
7367
7368 if (!netif_running(bp->dev))
7369 goto reset_task_exit;
7370
7371 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7372 bnx2x_nic_load(bp, LOAD_NORMAL);
7373
7374reset_task_exit:
7375 rtnl_unlock();
7376}
7377
a2fbb9ea
ET
7378/* end of nic load/unload */
7379
7380/* ethtool_ops */
7381
7382/*
7383 * Init service functions
7384 */
7385
f1ef27ef
EG
7386static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7387{
7388 switch (func) {
7389 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7390 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7391 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7392 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7393 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7394 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7395 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7396 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7397 default:
7398 BNX2X_ERR("Unsupported function index: %d\n", func);
7399 return (u32)(-1);
7400 }
7401}
7402
7403static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7404{
7405 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7406
7407 /* Flush all outstanding writes */
7408 mmiowb();
7409
7410 /* Pretend to be function 0 */
7411 REG_WR(bp, reg, 0);
7412 /* Flush the GRC transaction (in the chip) */
7413 new_val = REG_RD(bp, reg);
7414 if (new_val != 0) {
7415 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7416 new_val);
7417 BUG();
7418 }
7419
7420 /* From now we are in the "like-E1" mode */
7421 bnx2x_int_disable(bp);
7422
7423 /* Flush all outstanding writes */
7424 mmiowb();
7425
7426 /* Restore the original funtion settings */
7427 REG_WR(bp, reg, orig_func);
7428 new_val = REG_RD(bp, reg);
7429 if (new_val != orig_func) {
7430 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7431 orig_func, new_val);
7432 BUG();
7433 }
7434}
7435
7436static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7437{
7438 if (CHIP_IS_E1H(bp))
7439 bnx2x_undi_int_disable_e1h(bp, func);
7440 else
7441 bnx2x_int_disable(bp);
7442}
7443
34f80b04
EG
7444static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7445{
7446 u32 val;
7447
7448 /* Check if there is any driver already loaded */
7449 val = REG_RD(bp, MISC_REG_UNPREPARED);
7450 if (val == 0x1) {
7451 /* Check if it is the UNDI driver
7452 * UNDI driver initializes CID offset for normal bell to 0x7
7453 */
4a37fb66 7454 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7455 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7456 if (val == 0x7) {
7457 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7458 /* save our func */
34f80b04 7459 int func = BP_FUNC(bp);
da5a662a
VZ
7460 u32 swap_en;
7461 u32 swap_val;
34f80b04 7462
b4661739
EG
7463 /* clear the UNDI indication */
7464 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7465
34f80b04
EG
7466 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7467
7468 /* try unload UNDI on port 0 */
7469 bp->func = 0;
da5a662a
VZ
7470 bp->fw_seq =
7471 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7472 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7473 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7474
7475 /* if UNDI is loaded on the other port */
7476 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7477
da5a662a
VZ
7478 /* send "DONE" for previous unload */
7479 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7480
7481 /* unload UNDI on port 1 */
34f80b04 7482 bp->func = 1;
da5a662a
VZ
7483 bp->fw_seq =
7484 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7485 DRV_MSG_SEQ_NUMBER_MASK);
7486 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7487
7488 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7489 }
7490
b4661739
EG
7491 /* now it's safe to release the lock */
7492 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7493
f1ef27ef 7494 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7495
7496 /* close input traffic and wait for it */
7497 /* Do not rcv packets to BRB */
7498 REG_WR(bp,
7499 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7500 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7501 /* Do not direct rcv packets that are not for MCP to
7502 * the BRB */
7503 REG_WR(bp,
7504 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7505 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7506 /* clear AEU */
7507 REG_WR(bp,
7508 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7509 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7510 msleep(10);
7511
7512 /* save NIG port swap info */
7513 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7514 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7515 /* reset device */
7516 REG_WR(bp,
7517 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7518 0xd3ffffff);
34f80b04
EG
7519 REG_WR(bp,
7520 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7521 0x1403);
da5a662a
VZ
7522 /* take the NIG out of reset and restore swap values */
7523 REG_WR(bp,
7524 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7525 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7526 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7527 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7528
7529 /* send unload done to the MCP */
7530 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7531
7532 /* restore our func and fw_seq */
7533 bp->func = func;
7534 bp->fw_seq =
7535 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7536 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7537
7538 } else
7539 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7540 }
7541}
7542
7543static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7544{
7545 u32 val, val2, val3, val4, id;
72ce58c3 7546 u16 pmc;
34f80b04
EG
7547
7548 /* Get the chip revision id and number. */
7549 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7550 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7551 id = ((val & 0xffff) << 16);
7552 val = REG_RD(bp, MISC_REG_CHIP_REV);
7553 id |= ((val & 0xf) << 12);
7554 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7555 id |= ((val & 0xff) << 4);
5a40e08e 7556 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7557 id |= (val & 0xf);
7558 bp->common.chip_id = id;
7559 bp->link_params.chip_id = bp->common.chip_id;
7560 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7561
1c06328c
EG
7562 val = (REG_RD(bp, 0x2874) & 0x55);
7563 if ((bp->common.chip_id & 0x1) ||
7564 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7565 bp->flags |= ONE_PORT_FLAG;
7566 BNX2X_DEV_INFO("single port device\n");
7567 }
7568
34f80b04
EG
7569 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7570 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7571 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7572 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7573 bp->common.flash_size, bp->common.flash_size);
7574
7575 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7576 bp->link_params.shmem_base = bp->common.shmem_base;
7577 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7578
7579 if (!bp->common.shmem_base ||
7580 (bp->common.shmem_base < 0xA0000) ||
7581 (bp->common.shmem_base >= 0xC0000)) {
7582 BNX2X_DEV_INFO("MCP not active\n");
7583 bp->flags |= NO_MCP_FLAG;
7584 return;
7585 }
7586
7587 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7588 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7589 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7590 BNX2X_ERR("BAD MCP validity signature\n");
7591
7592 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7593 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7594
7595 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7596 SHARED_HW_CFG_LED_MODE_MASK) >>
7597 SHARED_HW_CFG_LED_MODE_SHIFT);
7598
c2c8b03e
EG
7599 bp->link_params.feature_config_flags = 0;
7600 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7601 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7602 bp->link_params.feature_config_flags |=
7603 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7604 else
7605 bp->link_params.feature_config_flags &=
7606 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7607
34f80b04
EG
7608 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7609 bp->common.bc_ver = val;
7610 BNX2X_DEV_INFO("bc_ver %X\n", val);
7611 if (val < BNX2X_BC_VER) {
7612 /* for now only warn
7613 * later we might need to enforce this */
7614 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7615 " please upgrade BC\n", BNX2X_BC_VER, val);
7616 }
72ce58c3
EG
7617
7618 if (BP_E1HVN(bp) == 0) {
7619 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7620 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7621 } else {
7622 /* no WOL capability for E1HVN != 0 */
7623 bp->flags |= NO_WOL_FLAG;
7624 }
7625 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7626 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7627
7628 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7629 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7630 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7631 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7632
7633 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7634 val, val2, val3, val4);
7635}
7636
7637static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7638 u32 switch_cfg)
a2fbb9ea 7639{
34f80b04 7640 int port = BP_PORT(bp);
a2fbb9ea
ET
7641 u32 ext_phy_type;
7642
a2fbb9ea
ET
7643 switch (switch_cfg) {
7644 case SWITCH_CFG_1G:
7645 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7646
c18487ee
YR
7647 ext_phy_type =
7648 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7649 switch (ext_phy_type) {
7650 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7651 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7652 ext_phy_type);
7653
34f80b04
EG
7654 bp->port.supported |= (SUPPORTED_10baseT_Half |
7655 SUPPORTED_10baseT_Full |
7656 SUPPORTED_100baseT_Half |
7657 SUPPORTED_100baseT_Full |
7658 SUPPORTED_1000baseT_Full |
7659 SUPPORTED_2500baseX_Full |
7660 SUPPORTED_TP |
7661 SUPPORTED_FIBRE |
7662 SUPPORTED_Autoneg |
7663 SUPPORTED_Pause |
7664 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7665 break;
7666
7667 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7668 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7669 ext_phy_type);
7670
34f80b04
EG
7671 bp->port.supported |= (SUPPORTED_10baseT_Half |
7672 SUPPORTED_10baseT_Full |
7673 SUPPORTED_100baseT_Half |
7674 SUPPORTED_100baseT_Full |
7675 SUPPORTED_1000baseT_Full |
7676 SUPPORTED_TP |
7677 SUPPORTED_FIBRE |
7678 SUPPORTED_Autoneg |
7679 SUPPORTED_Pause |
7680 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7681 break;
7682
7683 default:
7684 BNX2X_ERR("NVRAM config error. "
7685 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7686 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7687 return;
7688 }
7689
34f80b04
EG
7690 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7691 port*0x10);
7692 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7693 break;
7694
7695 case SWITCH_CFG_10G:
7696 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7697
c18487ee
YR
7698 ext_phy_type =
7699 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7700 switch (ext_phy_type) {
7701 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7702 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7703 ext_phy_type);
7704
34f80b04
EG
7705 bp->port.supported |= (SUPPORTED_10baseT_Half |
7706 SUPPORTED_10baseT_Full |
7707 SUPPORTED_100baseT_Half |
7708 SUPPORTED_100baseT_Full |
7709 SUPPORTED_1000baseT_Full |
7710 SUPPORTED_2500baseX_Full |
7711 SUPPORTED_10000baseT_Full |
7712 SUPPORTED_TP |
7713 SUPPORTED_FIBRE |
7714 SUPPORTED_Autoneg |
7715 SUPPORTED_Pause |
7716 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7717 break;
7718
589abe3a
EG
7719 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7720 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7721 ext_phy_type);
f1410647 7722
34f80b04 7723 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7724 SUPPORTED_1000baseT_Full |
34f80b04 7725 SUPPORTED_FIBRE |
589abe3a 7726 SUPPORTED_Autoneg |
34f80b04
EG
7727 SUPPORTED_Pause |
7728 SUPPORTED_Asym_Pause);
f1410647
ET
7729 break;
7730
589abe3a
EG
7731 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7732 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7733 ext_phy_type);
7734
34f80b04 7735 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7736 SUPPORTED_2500baseX_Full |
34f80b04 7737 SUPPORTED_1000baseT_Full |
589abe3a
EG
7738 SUPPORTED_FIBRE |
7739 SUPPORTED_Autoneg |
7740 SUPPORTED_Pause |
7741 SUPPORTED_Asym_Pause);
7742 break;
7743
7744 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7745 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7746 ext_phy_type);
7747
7748 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7749 SUPPORTED_FIBRE |
7750 SUPPORTED_Pause |
7751 SUPPORTED_Asym_Pause);
f1410647
ET
7752 break;
7753
589abe3a
EG
7754 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7755 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7756 ext_phy_type);
7757
34f80b04
EG
7758 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7759 SUPPORTED_1000baseT_Full |
7760 SUPPORTED_FIBRE |
34f80b04
EG
7761 SUPPORTED_Pause |
7762 SUPPORTED_Asym_Pause);
f1410647
ET
7763 break;
7764
589abe3a
EG
7765 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7766 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7767 ext_phy_type);
7768
34f80b04 7769 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7770 SUPPORTED_1000baseT_Full |
34f80b04 7771 SUPPORTED_Autoneg |
589abe3a 7772 SUPPORTED_FIBRE |
34f80b04
EG
7773 SUPPORTED_Pause |
7774 SUPPORTED_Asym_Pause);
c18487ee
YR
7775 break;
7776
f1410647
ET
7777 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7778 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7779 ext_phy_type);
7780
34f80b04
EG
7781 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7782 SUPPORTED_TP |
7783 SUPPORTED_Autoneg |
7784 SUPPORTED_Pause |
7785 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7786 break;
7787
28577185
EG
7788 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7789 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7790 ext_phy_type);
7791
7792 bp->port.supported |= (SUPPORTED_10baseT_Half |
7793 SUPPORTED_10baseT_Full |
7794 SUPPORTED_100baseT_Half |
7795 SUPPORTED_100baseT_Full |
7796 SUPPORTED_1000baseT_Full |
7797 SUPPORTED_10000baseT_Full |
7798 SUPPORTED_TP |
7799 SUPPORTED_Autoneg |
7800 SUPPORTED_Pause |
7801 SUPPORTED_Asym_Pause);
7802 break;
7803
c18487ee
YR
7804 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7805 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7806 bp->link_params.ext_phy_config);
7807 break;
7808
a2fbb9ea
ET
7809 default:
7810 BNX2X_ERR("NVRAM config error. "
7811 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7812 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7813 return;
7814 }
7815
34f80b04
EG
7816 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7817 port*0x18);
7818 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7819
a2fbb9ea
ET
7820 break;
7821
7822 default:
7823 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7824 bp->port.link_config);
a2fbb9ea
ET
7825 return;
7826 }
34f80b04 7827 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7828
7829 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7830 if (!(bp->link_params.speed_cap_mask &
7831 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7832 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7833
c18487ee
YR
7834 if (!(bp->link_params.speed_cap_mask &
7835 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7836 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7837
c18487ee
YR
7838 if (!(bp->link_params.speed_cap_mask &
7839 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7840 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7841
c18487ee
YR
7842 if (!(bp->link_params.speed_cap_mask &
7843 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7844 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7845
c18487ee
YR
7846 if (!(bp->link_params.speed_cap_mask &
7847 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7848 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7849 SUPPORTED_1000baseT_Full);
a2fbb9ea 7850
c18487ee
YR
7851 if (!(bp->link_params.speed_cap_mask &
7852 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7853 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7854
c18487ee
YR
7855 if (!(bp->link_params.speed_cap_mask &
7856 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7857 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7858
34f80b04 7859 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7860}
7861
34f80b04 7862static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7863{
c18487ee 7864 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7865
34f80b04 7866 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7867 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7868 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7869 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7870 bp->port.advertising = bp->port.supported;
a2fbb9ea 7871 } else {
c18487ee
YR
7872 u32 ext_phy_type =
7873 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7874
7875 if ((ext_phy_type ==
7876 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7877 (ext_phy_type ==
7878 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7879 /* force 10G, no AN */
c18487ee 7880 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7881 bp->port.advertising =
a2fbb9ea
ET
7882 (ADVERTISED_10000baseT_Full |
7883 ADVERTISED_FIBRE);
7884 break;
7885 }
7886 BNX2X_ERR("NVRAM config error. "
7887 "Invalid link_config 0x%x"
7888 " Autoneg not supported\n",
34f80b04 7889 bp->port.link_config);
a2fbb9ea
ET
7890 return;
7891 }
7892 break;
7893
7894 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7895 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7896 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7897 bp->port.advertising = (ADVERTISED_10baseT_Full |
7898 ADVERTISED_TP);
a2fbb9ea
ET
7899 } else {
7900 BNX2X_ERR("NVRAM config error. "
7901 "Invalid link_config 0x%x"
7902 " speed_cap_mask 0x%x\n",
34f80b04 7903 bp->port.link_config,
c18487ee 7904 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7905 return;
7906 }
7907 break;
7908
7909 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7910 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7911 bp->link_params.req_line_speed = SPEED_10;
7912 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7913 bp->port.advertising = (ADVERTISED_10baseT_Half |
7914 ADVERTISED_TP);
a2fbb9ea
ET
7915 } else {
7916 BNX2X_ERR("NVRAM config error. "
7917 "Invalid link_config 0x%x"
7918 " speed_cap_mask 0x%x\n",
34f80b04 7919 bp->port.link_config,
c18487ee 7920 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7921 return;
7922 }
7923 break;
7924
7925 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7926 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7927 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7928 bp->port.advertising = (ADVERTISED_100baseT_Full |
7929 ADVERTISED_TP);
a2fbb9ea
ET
7930 } else {
7931 BNX2X_ERR("NVRAM config error. "
7932 "Invalid link_config 0x%x"
7933 " speed_cap_mask 0x%x\n",
34f80b04 7934 bp->port.link_config,
c18487ee 7935 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7936 return;
7937 }
7938 break;
7939
7940 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7941 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7942 bp->link_params.req_line_speed = SPEED_100;
7943 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7944 bp->port.advertising = (ADVERTISED_100baseT_Half |
7945 ADVERTISED_TP);
a2fbb9ea
ET
7946 } else {
7947 BNX2X_ERR("NVRAM config error. "
7948 "Invalid link_config 0x%x"
7949 " speed_cap_mask 0x%x\n",
34f80b04 7950 bp->port.link_config,
c18487ee 7951 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7952 return;
7953 }
7954 break;
7955
7956 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7957 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7958 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7959 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7960 ADVERTISED_TP);
a2fbb9ea
ET
7961 } else {
7962 BNX2X_ERR("NVRAM config error. "
7963 "Invalid link_config 0x%x"
7964 " speed_cap_mask 0x%x\n",
34f80b04 7965 bp->port.link_config,
c18487ee 7966 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7967 return;
7968 }
7969 break;
7970
7971 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7972 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7973 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7974 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7975 ADVERTISED_TP);
a2fbb9ea
ET
7976 } else {
7977 BNX2X_ERR("NVRAM config error. "
7978 "Invalid link_config 0x%x"
7979 " speed_cap_mask 0x%x\n",
34f80b04 7980 bp->port.link_config,
c18487ee 7981 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7982 return;
7983 }
7984 break;
7985
7986 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7987 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7988 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7989 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7990 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7991 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7992 ADVERTISED_FIBRE);
a2fbb9ea
ET
7993 } else {
7994 BNX2X_ERR("NVRAM config error. "
7995 "Invalid link_config 0x%x"
7996 " speed_cap_mask 0x%x\n",
34f80b04 7997 bp->port.link_config,
c18487ee 7998 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7999 return;
8000 }
8001 break;
8002
8003 default:
8004 BNX2X_ERR("NVRAM config error. "
8005 "BAD link speed link_config 0x%x\n",
34f80b04 8006 bp->port.link_config);
c18487ee 8007 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8008 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8009 break;
8010 }
a2fbb9ea 8011
34f80b04
EG
8012 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8013 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8014 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8015 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8016 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8017
c18487ee 8018 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8019 " advertising 0x%x\n",
c18487ee
YR
8020 bp->link_params.req_line_speed,
8021 bp->link_params.req_duplex,
34f80b04 8022 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8023}
8024
34f80b04 8025static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8026{
34f80b04
EG
8027 int port = BP_PORT(bp);
8028 u32 val, val2;
589abe3a 8029 u32 config;
c2c8b03e 8030 u16 i;
a2fbb9ea 8031
c18487ee 8032 bp->link_params.bp = bp;
34f80b04 8033 bp->link_params.port = port;
c18487ee 8034
c18487ee 8035 bp->link_params.lane_config =
a2fbb9ea 8036 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8037 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8038 SHMEM_RD(bp,
8039 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8040 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8041 SHMEM_RD(bp,
8042 dev_info.port_hw_config[port].speed_capability_mask);
8043
34f80b04 8044 bp->port.link_config =
a2fbb9ea
ET
8045 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8046
c2c8b03e
EG
8047 /* Get the 4 lanes xgxs config rx and tx */
8048 for (i = 0; i < 2; i++) {
8049 val = SHMEM_RD(bp,
8050 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8051 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8052 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8053
8054 val = SHMEM_RD(bp,
8055 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8056 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8057 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8058 }
8059
589abe3a
EG
8060 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8061 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8062 bp->link_params.feature_config_flags |=
8063 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8064 else
8065 bp->link_params.feature_config_flags &=
8066 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8067
3ce2c3f9
EG
8068 /* If the device is capable of WoL, set the default state according
8069 * to the HW
8070 */
8071 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8072 (config & PORT_FEATURE_WOL_ENABLED));
8073
c2c8b03e
EG
8074 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8075 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8076 bp->link_params.lane_config,
8077 bp->link_params.ext_phy_config,
34f80b04 8078 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8079
34f80b04 8080 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8081 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8082 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8083
8084 bnx2x_link_settings_requested(bp);
8085
8086 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8087 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8088 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8089 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8090 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8091 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8092 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8093 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8094 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8095 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8096}
8097
8098static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8099{
8100 int func = BP_FUNC(bp);
8101 u32 val, val2;
8102 int rc = 0;
a2fbb9ea 8103
34f80b04 8104 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8105
34f80b04
EG
8106 bp->e1hov = 0;
8107 bp->e1hmf = 0;
8108 if (CHIP_IS_E1H(bp)) {
8109 bp->mf_config =
8110 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8111
3196a88a
EG
8112 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8113 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8114 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8115
34f80b04
EG
8116 bp->e1hov = val;
8117 bp->e1hmf = 1;
8118 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8119 "(0x%04x)\n",
8120 func, bp->e1hov, bp->e1hov);
8121 } else {
f5372251 8122 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8123 if (BP_E1HVN(bp)) {
8124 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8125 " aborting\n", func);
8126 rc = -EPERM;
8127 }
8128 }
8129 }
a2fbb9ea 8130
34f80b04
EG
8131 if (!BP_NOMCP(bp)) {
8132 bnx2x_get_port_hwinfo(bp);
8133
8134 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8135 DRV_MSG_SEQ_NUMBER_MASK);
8136 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8137 }
8138
8139 if (IS_E1HMF(bp)) {
8140 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8141 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8142 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8143 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8144 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8145 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8146 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8147 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8148 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8149 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8150 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8151 ETH_ALEN);
8152 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8153 ETH_ALEN);
a2fbb9ea 8154 }
34f80b04
EG
8155
8156 return rc;
a2fbb9ea
ET
8157 }
8158
34f80b04
EG
8159 if (BP_NOMCP(bp)) {
8160 /* only supposed to happen on emulation/FPGA */
33471629 8161 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8162 random_ether_addr(bp->dev->dev_addr);
8163 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8164 }
a2fbb9ea 8165
34f80b04
EG
8166 return rc;
8167}
8168
8169static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8170{
8171 int func = BP_FUNC(bp);
87942b46 8172 int timer_interval;
34f80b04
EG
8173 int rc;
8174
da5a662a
VZ
8175 /* Disable interrupt handling until HW is initialized */
8176 atomic_set(&bp->intr_sem, 1);
8177
34f80b04 8178 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8179
1cf167f2 8180 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8181 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8182
8183 rc = bnx2x_get_hwinfo(bp);
8184
8185 /* need to reset chip if undi was active */
8186 if (!BP_NOMCP(bp))
8187 bnx2x_undi_unload(bp);
8188
8189 if (CHIP_REV_IS_FPGA(bp))
8190 printk(KERN_ERR PFX "FPGA detected\n");
8191
8192 if (BP_NOMCP(bp) && (func == 0))
8193 printk(KERN_ERR PFX
8194 "MCP disabled, must load devices in order!\n");
8195
555f6c78 8196 /* Set multi queue mode */
8badd27a
EG
8197 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8198 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8199 printk(KERN_ERR PFX
8badd27a 8200 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8201 multi_mode = ETH_RSS_MODE_DISABLED;
8202 }
8203 bp->multi_mode = multi_mode;
8204
8205
7a9b2557
VZ
8206 /* Set TPA flags */
8207 if (disable_tpa) {
8208 bp->flags &= ~TPA_ENABLE_FLAG;
8209 bp->dev->features &= ~NETIF_F_LRO;
8210 } else {
8211 bp->flags |= TPA_ENABLE_FLAG;
8212 bp->dev->features |= NETIF_F_LRO;
8213 }
8214
8d5726c4 8215 bp->mrrs = mrrs;
7a9b2557 8216
34f80b04
EG
8217 bp->tx_ring_size = MAX_TX_AVAIL;
8218 bp->rx_ring_size = MAX_RX_AVAIL;
8219
8220 bp->rx_csum = 1;
34f80b04
EG
8221
8222 bp->tx_ticks = 50;
8223 bp->rx_ticks = 25;
8224
87942b46
EG
8225 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8226 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8227
8228 init_timer(&bp->timer);
8229 bp->timer.expires = jiffies + bp->current_interval;
8230 bp->timer.data = (unsigned long) bp;
8231 bp->timer.function = bnx2x_timer;
8232
8233 return rc;
a2fbb9ea
ET
8234}
8235
8236/*
8237 * ethtool service functions
8238 */
8239
8240/* All ethtool functions called with rtnl_lock */
8241
8242static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8243{
8244 struct bnx2x *bp = netdev_priv(dev);
8245
34f80b04
EG
8246 cmd->supported = bp->port.supported;
8247 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8248
8249 if (netif_carrier_ok(dev)) {
c18487ee
YR
8250 cmd->speed = bp->link_vars.line_speed;
8251 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8252 } else {
c18487ee
YR
8253 cmd->speed = bp->link_params.req_line_speed;
8254 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8255 }
34f80b04
EG
8256 if (IS_E1HMF(bp)) {
8257 u16 vn_max_rate;
8258
8259 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8260 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8261 if (vn_max_rate < cmd->speed)
8262 cmd->speed = vn_max_rate;
8263 }
a2fbb9ea 8264
c18487ee
YR
8265 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8266 u32 ext_phy_type =
8267 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8268
8269 switch (ext_phy_type) {
8270 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8271 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8272 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8273 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8276 cmd->port = PORT_FIBRE;
8277 break;
8278
8279 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8280 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8281 cmd->port = PORT_TP;
8282 break;
8283
c18487ee
YR
8284 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8285 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8286 bp->link_params.ext_phy_config);
8287 break;
8288
f1410647
ET
8289 default:
8290 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8291 bp->link_params.ext_phy_config);
8292 break;
f1410647
ET
8293 }
8294 } else
a2fbb9ea 8295 cmd->port = PORT_TP;
a2fbb9ea 8296
34f80b04 8297 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8298 cmd->transceiver = XCVR_INTERNAL;
8299
c18487ee 8300 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8301 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8302 else
a2fbb9ea 8303 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8304
8305 cmd->maxtxpkt = 0;
8306 cmd->maxrxpkt = 0;
8307
8308 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8309 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8310 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8311 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8312 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8313 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8314 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8315
8316 return 0;
8317}
8318
8319static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8320{
8321 struct bnx2x *bp = netdev_priv(dev);
8322 u32 advertising;
8323
34f80b04
EG
8324 if (IS_E1HMF(bp))
8325 return 0;
8326
a2fbb9ea
ET
8327 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8328 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8329 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8330 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8331 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8332 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8333 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8334
a2fbb9ea 8335 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8336 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8337 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8338 return -EINVAL;
f1410647 8339 }
a2fbb9ea
ET
8340
8341 /* advertise the requested speed and duplex if supported */
34f80b04 8342 cmd->advertising &= bp->port.supported;
a2fbb9ea 8343
c18487ee
YR
8344 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8345 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8346 bp->port.advertising |= (ADVERTISED_Autoneg |
8347 cmd->advertising);
a2fbb9ea
ET
8348
8349 } else { /* forced speed */
8350 /* advertise the requested speed and duplex if supported */
8351 switch (cmd->speed) {
8352 case SPEED_10:
8353 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8354 if (!(bp->port.supported &
f1410647
ET
8355 SUPPORTED_10baseT_Full)) {
8356 DP(NETIF_MSG_LINK,
8357 "10M full not supported\n");
a2fbb9ea 8358 return -EINVAL;
f1410647 8359 }
a2fbb9ea
ET
8360
8361 advertising = (ADVERTISED_10baseT_Full |
8362 ADVERTISED_TP);
8363 } else {
34f80b04 8364 if (!(bp->port.supported &
f1410647
ET
8365 SUPPORTED_10baseT_Half)) {
8366 DP(NETIF_MSG_LINK,
8367 "10M half not supported\n");
a2fbb9ea 8368 return -EINVAL;
f1410647 8369 }
a2fbb9ea
ET
8370
8371 advertising = (ADVERTISED_10baseT_Half |
8372 ADVERTISED_TP);
8373 }
8374 break;
8375
8376 case SPEED_100:
8377 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8378 if (!(bp->port.supported &
f1410647
ET
8379 SUPPORTED_100baseT_Full)) {
8380 DP(NETIF_MSG_LINK,
8381 "100M full not supported\n");
a2fbb9ea 8382 return -EINVAL;
f1410647 8383 }
a2fbb9ea
ET
8384
8385 advertising = (ADVERTISED_100baseT_Full |
8386 ADVERTISED_TP);
8387 } else {
34f80b04 8388 if (!(bp->port.supported &
f1410647
ET
8389 SUPPORTED_100baseT_Half)) {
8390 DP(NETIF_MSG_LINK,
8391 "100M half not supported\n");
a2fbb9ea 8392 return -EINVAL;
f1410647 8393 }
a2fbb9ea
ET
8394
8395 advertising = (ADVERTISED_100baseT_Half |
8396 ADVERTISED_TP);
8397 }
8398 break;
8399
8400 case SPEED_1000:
f1410647
ET
8401 if (cmd->duplex != DUPLEX_FULL) {
8402 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8403 return -EINVAL;
f1410647 8404 }
a2fbb9ea 8405
34f80b04 8406 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8407 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8408 return -EINVAL;
f1410647 8409 }
a2fbb9ea
ET
8410
8411 advertising = (ADVERTISED_1000baseT_Full |
8412 ADVERTISED_TP);
8413 break;
8414
8415 case SPEED_2500:
f1410647
ET
8416 if (cmd->duplex != DUPLEX_FULL) {
8417 DP(NETIF_MSG_LINK,
8418 "2.5G half not supported\n");
a2fbb9ea 8419 return -EINVAL;
f1410647 8420 }
a2fbb9ea 8421
34f80b04 8422 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8423 DP(NETIF_MSG_LINK,
8424 "2.5G full not supported\n");
a2fbb9ea 8425 return -EINVAL;
f1410647 8426 }
a2fbb9ea 8427
f1410647 8428 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8429 ADVERTISED_TP);
8430 break;
8431
8432 case SPEED_10000:
f1410647
ET
8433 if (cmd->duplex != DUPLEX_FULL) {
8434 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8435 return -EINVAL;
f1410647 8436 }
a2fbb9ea 8437
34f80b04 8438 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8439 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8440 return -EINVAL;
f1410647 8441 }
a2fbb9ea
ET
8442
8443 advertising = (ADVERTISED_10000baseT_Full |
8444 ADVERTISED_FIBRE);
8445 break;
8446
8447 default:
f1410647 8448 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8449 return -EINVAL;
8450 }
8451
c18487ee
YR
8452 bp->link_params.req_line_speed = cmd->speed;
8453 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8454 bp->port.advertising = advertising;
a2fbb9ea
ET
8455 }
8456
c18487ee 8457 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8458 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8459 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8460 bp->port.advertising);
a2fbb9ea 8461
34f80b04 8462 if (netif_running(dev)) {
bb2a0f7a 8463 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8464 bnx2x_link_set(bp);
8465 }
a2fbb9ea
ET
8466
8467 return 0;
8468}
8469
c18487ee
YR
8470#define PHY_FW_VER_LEN 10
8471
a2fbb9ea
ET
8472static void bnx2x_get_drvinfo(struct net_device *dev,
8473 struct ethtool_drvinfo *info)
8474{
8475 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8476 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8477
8478 strcpy(info->driver, DRV_MODULE_NAME);
8479 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8480
8481 phy_fw_ver[0] = '\0';
34f80b04 8482 if (bp->port.pmf) {
4a37fb66 8483 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8484 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8485 (bp->state != BNX2X_STATE_CLOSED),
8486 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8487 bnx2x_release_phy_lock(bp);
34f80b04 8488 }
c18487ee 8489
f0e53a84
EG
8490 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8491 (bp->common.bc_ver & 0xff0000) >> 16,
8492 (bp->common.bc_ver & 0xff00) >> 8,
8493 (bp->common.bc_ver & 0xff),
8494 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8495 strcpy(info->bus_info, pci_name(bp->pdev));
8496 info->n_stats = BNX2X_NUM_STATS;
8497 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8498 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8499 info->regdump_len = 0;
8500}
8501
8502static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8503{
8504 struct bnx2x *bp = netdev_priv(dev);
8505
8506 if (bp->flags & NO_WOL_FLAG) {
8507 wol->supported = 0;
8508 wol->wolopts = 0;
8509 } else {
8510 wol->supported = WAKE_MAGIC;
8511 if (bp->wol)
8512 wol->wolopts = WAKE_MAGIC;
8513 else
8514 wol->wolopts = 0;
8515 }
8516 memset(&wol->sopass, 0, sizeof(wol->sopass));
8517}
8518
8519static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8520{
8521 struct bnx2x *bp = netdev_priv(dev);
8522
8523 if (wol->wolopts & ~WAKE_MAGIC)
8524 return -EINVAL;
8525
8526 if (wol->wolopts & WAKE_MAGIC) {
8527 if (bp->flags & NO_WOL_FLAG)
8528 return -EINVAL;
8529
8530 bp->wol = 1;
34f80b04 8531 } else
a2fbb9ea 8532 bp->wol = 0;
34f80b04 8533
a2fbb9ea
ET
8534 return 0;
8535}
8536
8537static u32 bnx2x_get_msglevel(struct net_device *dev)
8538{
8539 struct bnx2x *bp = netdev_priv(dev);
8540
8541 return bp->msglevel;
8542}
8543
8544static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8545{
8546 struct bnx2x *bp = netdev_priv(dev);
8547
8548 if (capable(CAP_NET_ADMIN))
8549 bp->msglevel = level;
8550}
8551
8552static int bnx2x_nway_reset(struct net_device *dev)
8553{
8554 struct bnx2x *bp = netdev_priv(dev);
8555
34f80b04
EG
8556 if (!bp->port.pmf)
8557 return 0;
a2fbb9ea 8558
34f80b04 8559 if (netif_running(dev)) {
bb2a0f7a 8560 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8561 bnx2x_link_set(bp);
8562 }
a2fbb9ea
ET
8563
8564 return 0;
8565}
8566
8567static int bnx2x_get_eeprom_len(struct net_device *dev)
8568{
8569 struct bnx2x *bp = netdev_priv(dev);
8570
34f80b04 8571 return bp->common.flash_size;
a2fbb9ea
ET
8572}
8573
8574static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8575{
34f80b04 8576 int port = BP_PORT(bp);
a2fbb9ea
ET
8577 int count, i;
8578 u32 val = 0;
8579
8580 /* adjust timeout for emulation/FPGA */
8581 count = NVRAM_TIMEOUT_COUNT;
8582 if (CHIP_REV_IS_SLOW(bp))
8583 count *= 100;
8584
8585 /* request access to nvram interface */
8586 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8587 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8588
8589 for (i = 0; i < count*10; i++) {
8590 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8591 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8592 break;
8593
8594 udelay(5);
8595 }
8596
8597 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8598 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8599 return -EBUSY;
8600 }
8601
8602 return 0;
8603}
8604
8605static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8606{
34f80b04 8607 int port = BP_PORT(bp);
a2fbb9ea
ET
8608 int count, i;
8609 u32 val = 0;
8610
8611 /* adjust timeout for emulation/FPGA */
8612 count = NVRAM_TIMEOUT_COUNT;
8613 if (CHIP_REV_IS_SLOW(bp))
8614 count *= 100;
8615
8616 /* relinquish nvram interface */
8617 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8618 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8619
8620 for (i = 0; i < count*10; i++) {
8621 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8622 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8623 break;
8624
8625 udelay(5);
8626 }
8627
8628 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8629 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8630 return -EBUSY;
8631 }
8632
8633 return 0;
8634}
8635
8636static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8637{
8638 u32 val;
8639
8640 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8641
8642 /* enable both bits, even on read */
8643 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8644 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8645 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8646}
8647
8648static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8649{
8650 u32 val;
8651
8652 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8653
8654 /* disable both bits, even after read */
8655 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8656 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8657 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8658}
8659
4781bfad 8660static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8661 u32 cmd_flags)
8662{
f1410647 8663 int count, i, rc;
a2fbb9ea
ET
8664 u32 val;
8665
8666 /* build the command word */
8667 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8668
8669 /* need to clear DONE bit separately */
8670 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8671
8672 /* address of the NVRAM to read from */
8673 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8674 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8675
8676 /* issue a read command */
8677 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8678
8679 /* adjust timeout for emulation/FPGA */
8680 count = NVRAM_TIMEOUT_COUNT;
8681 if (CHIP_REV_IS_SLOW(bp))
8682 count *= 100;
8683
8684 /* wait for completion */
8685 *ret_val = 0;
8686 rc = -EBUSY;
8687 for (i = 0; i < count; i++) {
8688 udelay(5);
8689 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8690
8691 if (val & MCPR_NVM_COMMAND_DONE) {
8692 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8693 /* we read nvram data in cpu order
8694 * but ethtool sees it as an array of bytes
8695 * converting to big-endian will do the work */
4781bfad 8696 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8697 rc = 0;
8698 break;
8699 }
8700 }
8701
8702 return rc;
8703}
8704
8705static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8706 int buf_size)
8707{
8708 int rc;
8709 u32 cmd_flags;
4781bfad 8710 __be32 val;
a2fbb9ea
ET
8711
8712 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8713 DP(BNX2X_MSG_NVM,
c14423fe 8714 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8715 offset, buf_size);
8716 return -EINVAL;
8717 }
8718
34f80b04
EG
8719 if (offset + buf_size > bp->common.flash_size) {
8720 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8721 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8722 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8723 return -EINVAL;
8724 }
8725
8726 /* request access to nvram interface */
8727 rc = bnx2x_acquire_nvram_lock(bp);
8728 if (rc)
8729 return rc;
8730
8731 /* enable access to nvram interface */
8732 bnx2x_enable_nvram_access(bp);
8733
8734 /* read the first word(s) */
8735 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8736 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8737 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8738 memcpy(ret_buf, &val, 4);
8739
8740 /* advance to the next dword */
8741 offset += sizeof(u32);
8742 ret_buf += sizeof(u32);
8743 buf_size -= sizeof(u32);
8744 cmd_flags = 0;
8745 }
8746
8747 if (rc == 0) {
8748 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8749 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8750 memcpy(ret_buf, &val, 4);
8751 }
8752
8753 /* disable access to nvram interface */
8754 bnx2x_disable_nvram_access(bp);
8755 bnx2x_release_nvram_lock(bp);
8756
8757 return rc;
8758}
8759
8760static int bnx2x_get_eeprom(struct net_device *dev,
8761 struct ethtool_eeprom *eeprom, u8 *eebuf)
8762{
8763 struct bnx2x *bp = netdev_priv(dev);
8764 int rc;
8765
2add3acb
EG
8766 if (!netif_running(dev))
8767 return -EAGAIN;
8768
34f80b04 8769 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8770 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8771 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8772 eeprom->len, eeprom->len);
8773
8774 /* parameters already validated in ethtool_get_eeprom */
8775
8776 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8777
8778 return rc;
8779}
8780
8781static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8782 u32 cmd_flags)
8783{
f1410647 8784 int count, i, rc;
a2fbb9ea
ET
8785
8786 /* build the command word */
8787 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8788
8789 /* need to clear DONE bit separately */
8790 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8791
8792 /* write the data */
8793 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8794
8795 /* address of the NVRAM to write to */
8796 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8797 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8798
8799 /* issue the write command */
8800 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8801
8802 /* adjust timeout for emulation/FPGA */
8803 count = NVRAM_TIMEOUT_COUNT;
8804 if (CHIP_REV_IS_SLOW(bp))
8805 count *= 100;
8806
8807 /* wait for completion */
8808 rc = -EBUSY;
8809 for (i = 0; i < count; i++) {
8810 udelay(5);
8811 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8812 if (val & MCPR_NVM_COMMAND_DONE) {
8813 rc = 0;
8814 break;
8815 }
8816 }
8817
8818 return rc;
8819}
8820
f1410647 8821#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8822
8823static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8824 int buf_size)
8825{
8826 int rc;
8827 u32 cmd_flags;
8828 u32 align_offset;
4781bfad 8829 __be32 val;
a2fbb9ea 8830
34f80b04
EG
8831 if (offset + buf_size > bp->common.flash_size) {
8832 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8833 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8834 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8835 return -EINVAL;
8836 }
8837
8838 /* request access to nvram interface */
8839 rc = bnx2x_acquire_nvram_lock(bp);
8840 if (rc)
8841 return rc;
8842
8843 /* enable access to nvram interface */
8844 bnx2x_enable_nvram_access(bp);
8845
8846 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8847 align_offset = (offset & ~0x03);
8848 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8849
8850 if (rc == 0) {
8851 val &= ~(0xff << BYTE_OFFSET(offset));
8852 val |= (*data_buf << BYTE_OFFSET(offset));
8853
8854 /* nvram data is returned as an array of bytes
8855 * convert it back to cpu order */
8856 val = be32_to_cpu(val);
8857
a2fbb9ea
ET
8858 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8859 cmd_flags);
8860 }
8861
8862 /* disable access to nvram interface */
8863 bnx2x_disable_nvram_access(bp);
8864 bnx2x_release_nvram_lock(bp);
8865
8866 return rc;
8867}
8868
8869static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8870 int buf_size)
8871{
8872 int rc;
8873 u32 cmd_flags;
8874 u32 val;
8875 u32 written_so_far;
8876
34f80b04 8877 if (buf_size == 1) /* ethtool */
a2fbb9ea 8878 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8879
8880 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8881 DP(BNX2X_MSG_NVM,
c14423fe 8882 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8883 offset, buf_size);
8884 return -EINVAL;
8885 }
8886
34f80b04
EG
8887 if (offset + buf_size > bp->common.flash_size) {
8888 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8889 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8890 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8891 return -EINVAL;
8892 }
8893
8894 /* request access to nvram interface */
8895 rc = bnx2x_acquire_nvram_lock(bp);
8896 if (rc)
8897 return rc;
8898
8899 /* enable access to nvram interface */
8900 bnx2x_enable_nvram_access(bp);
8901
8902 written_so_far = 0;
8903 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8904 while ((written_so_far < buf_size) && (rc == 0)) {
8905 if (written_so_far == (buf_size - sizeof(u32)))
8906 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8907 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8908 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8909 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8910 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8911
8912 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8913
8914 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8915
8916 /* advance to the next dword */
8917 offset += sizeof(u32);
8918 data_buf += sizeof(u32);
8919 written_so_far += sizeof(u32);
8920 cmd_flags = 0;
8921 }
8922
8923 /* disable access to nvram interface */
8924 bnx2x_disable_nvram_access(bp);
8925 bnx2x_release_nvram_lock(bp);
8926
8927 return rc;
8928}
8929
8930static int bnx2x_set_eeprom(struct net_device *dev,
8931 struct ethtool_eeprom *eeprom, u8 *eebuf)
8932{
8933 struct bnx2x *bp = netdev_priv(dev);
8934 int rc;
8935
9f4c9583
EG
8936 if (!netif_running(dev))
8937 return -EAGAIN;
8938
34f80b04 8939 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8940 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8941 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8942 eeprom->len, eeprom->len);
8943
8944 /* parameters already validated in ethtool_set_eeprom */
8945
c18487ee 8946 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8947 if (eeprom->magic == 0x00504859)
8948 if (bp->port.pmf) {
8949
4a37fb66 8950 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8951 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8952 bp->link_params.ext_phy_config,
8953 (bp->state != BNX2X_STATE_CLOSED),
8954 eebuf, eeprom->len);
bb2a0f7a
YG
8955 if ((bp->state == BNX2X_STATE_OPEN) ||
8956 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 8957 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 8958 &bp->link_vars, 1);
34f80b04
EG
8959 rc |= bnx2x_phy_init(&bp->link_params,
8960 &bp->link_vars);
bb2a0f7a 8961 }
4a37fb66 8962 bnx2x_release_phy_lock(bp);
34f80b04
EG
8963
8964 } else /* Only the PMF can access the PHY */
8965 return -EINVAL;
8966 else
c18487ee 8967 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8968
8969 return rc;
8970}
8971
8972static int bnx2x_get_coalesce(struct net_device *dev,
8973 struct ethtool_coalesce *coal)
8974{
8975 struct bnx2x *bp = netdev_priv(dev);
8976
8977 memset(coal, 0, sizeof(struct ethtool_coalesce));
8978
8979 coal->rx_coalesce_usecs = bp->rx_ticks;
8980 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8981
8982 return 0;
8983}
8984
8985static int bnx2x_set_coalesce(struct net_device *dev,
8986 struct ethtool_coalesce *coal)
8987{
8988 struct bnx2x *bp = netdev_priv(dev);
8989
8990 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8991 if (bp->rx_ticks > 3000)
8992 bp->rx_ticks = 3000;
8993
8994 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8995 if (bp->tx_ticks > 0x3000)
8996 bp->tx_ticks = 0x3000;
8997
34f80b04 8998 if (netif_running(dev))
a2fbb9ea
ET
8999 bnx2x_update_coalesce(bp);
9000
9001 return 0;
9002}
9003
9004static void bnx2x_get_ringparam(struct net_device *dev,
9005 struct ethtool_ringparam *ering)
9006{
9007 struct bnx2x *bp = netdev_priv(dev);
9008
9009 ering->rx_max_pending = MAX_RX_AVAIL;
9010 ering->rx_mini_max_pending = 0;
9011 ering->rx_jumbo_max_pending = 0;
9012
9013 ering->rx_pending = bp->rx_ring_size;
9014 ering->rx_mini_pending = 0;
9015 ering->rx_jumbo_pending = 0;
9016
9017 ering->tx_max_pending = MAX_TX_AVAIL;
9018 ering->tx_pending = bp->tx_ring_size;
9019}
9020
9021static int bnx2x_set_ringparam(struct net_device *dev,
9022 struct ethtool_ringparam *ering)
9023{
9024 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9025 int rc = 0;
a2fbb9ea
ET
9026
9027 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9028 (ering->tx_pending > MAX_TX_AVAIL) ||
9029 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9030 return -EINVAL;
9031
9032 bp->rx_ring_size = ering->rx_pending;
9033 bp->tx_ring_size = ering->tx_pending;
9034
34f80b04
EG
9035 if (netif_running(dev)) {
9036 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9037 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9038 }
9039
34f80b04 9040 return rc;
a2fbb9ea
ET
9041}
9042
9043static void bnx2x_get_pauseparam(struct net_device *dev,
9044 struct ethtool_pauseparam *epause)
9045{
9046 struct bnx2x *bp = netdev_priv(dev);
9047
356e2385
EG
9048 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9049 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9050 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9051
c0700f90
DM
9052 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9053 BNX2X_FLOW_CTRL_RX);
9054 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9055 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9056
9057 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9058 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9059 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9060}
9061
9062static int bnx2x_set_pauseparam(struct net_device *dev,
9063 struct ethtool_pauseparam *epause)
9064{
9065 struct bnx2x *bp = netdev_priv(dev);
9066
34f80b04
EG
9067 if (IS_E1HMF(bp))
9068 return 0;
9069
a2fbb9ea
ET
9070 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9071 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9072 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9073
c0700f90 9074 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9075
f1410647 9076 if (epause->rx_pause)
c0700f90 9077 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9078
f1410647 9079 if (epause->tx_pause)
c0700f90 9080 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9081
c0700f90
DM
9082 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9083 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9084
c18487ee 9085 if (epause->autoneg) {
34f80b04 9086 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9087 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9088 return -EINVAL;
9089 }
a2fbb9ea 9090
c18487ee 9091 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9092 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9093 }
a2fbb9ea 9094
c18487ee
YR
9095 DP(NETIF_MSG_LINK,
9096 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9097
9098 if (netif_running(dev)) {
bb2a0f7a 9099 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9100 bnx2x_link_set(bp);
9101 }
a2fbb9ea
ET
9102
9103 return 0;
9104}
9105
df0f2343
VZ
9106static int bnx2x_set_flags(struct net_device *dev, u32 data)
9107{
9108 struct bnx2x *bp = netdev_priv(dev);
9109 int changed = 0;
9110 int rc = 0;
9111
9112 /* TPA requires Rx CSUM offloading */
9113 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9114 if (!(dev->features & NETIF_F_LRO)) {
9115 dev->features |= NETIF_F_LRO;
9116 bp->flags |= TPA_ENABLE_FLAG;
9117 changed = 1;
9118 }
9119
9120 } else if (dev->features & NETIF_F_LRO) {
9121 dev->features &= ~NETIF_F_LRO;
9122 bp->flags &= ~TPA_ENABLE_FLAG;
9123 changed = 1;
9124 }
9125
9126 if (changed && netif_running(dev)) {
9127 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9128 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9129 }
9130
9131 return rc;
9132}
9133
a2fbb9ea
ET
9134static u32 bnx2x_get_rx_csum(struct net_device *dev)
9135{
9136 struct bnx2x *bp = netdev_priv(dev);
9137
9138 return bp->rx_csum;
9139}
9140
9141static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9142{
9143 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9144 int rc = 0;
a2fbb9ea
ET
9145
9146 bp->rx_csum = data;
df0f2343
VZ
9147
9148 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9149 TPA'ed packets will be discarded due to wrong TCP CSUM */
9150 if (!data) {
9151 u32 flags = ethtool_op_get_flags(dev);
9152
9153 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9154 }
9155
9156 return rc;
a2fbb9ea
ET
9157}
9158
9159static int bnx2x_set_tso(struct net_device *dev, u32 data)
9160{
755735eb 9161 if (data) {
a2fbb9ea 9162 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9163 dev->features |= NETIF_F_TSO6;
9164 } else {
a2fbb9ea 9165 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9166 dev->features &= ~NETIF_F_TSO6;
9167 }
9168
a2fbb9ea
ET
9169 return 0;
9170}
9171
f3c87cdd 9172static const struct {
a2fbb9ea
ET
9173 char string[ETH_GSTRING_LEN];
9174} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9175 { "register_test (offline)" },
9176 { "memory_test (offline)" },
9177 { "loopback_test (offline)" },
9178 { "nvram_test (online)" },
9179 { "interrupt_test (online)" },
9180 { "link_test (online)" },
d3d4f495 9181 { "idle check (online)" }
a2fbb9ea
ET
9182};
9183
9184static int bnx2x_self_test_count(struct net_device *dev)
9185{
9186 return BNX2X_NUM_TESTS;
9187}
9188
f3c87cdd
YG
9189static int bnx2x_test_registers(struct bnx2x *bp)
9190{
9191 int idx, i, rc = -ENODEV;
9192 u32 wr_val = 0;
9dabc424 9193 int port = BP_PORT(bp);
f3c87cdd
YG
9194 static const struct {
9195 u32 offset0;
9196 u32 offset1;
9197 u32 mask;
9198 } reg_tbl[] = {
9199/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9200 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9201 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9202 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9203 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9204 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9205 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9206 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9207 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9208 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9209/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9210 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9211 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9212 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9213 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9214 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9215 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9216 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9217 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9218 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9219/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9220 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9221 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9222 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9223 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9224 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9225 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9226 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9227 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9228 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9229/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9230 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9231 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9232 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9233 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9234 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9235 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9236 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9237
9238 { 0xffffffff, 0, 0x00000000 }
9239 };
9240
9241 if (!netif_running(bp->dev))
9242 return rc;
9243
9244 /* Repeat the test twice:
9245 First by writing 0x00000000, second by writing 0xffffffff */
9246 for (idx = 0; idx < 2; idx++) {
9247
9248 switch (idx) {
9249 case 0:
9250 wr_val = 0;
9251 break;
9252 case 1:
9253 wr_val = 0xffffffff;
9254 break;
9255 }
9256
9257 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9258 u32 offset, mask, save_val, val;
f3c87cdd
YG
9259
9260 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9261 mask = reg_tbl[i].mask;
9262
9263 save_val = REG_RD(bp, offset);
9264
9265 REG_WR(bp, offset, wr_val);
9266 val = REG_RD(bp, offset);
9267
9268 /* Restore the original register's value */
9269 REG_WR(bp, offset, save_val);
9270
9271 /* verify that value is as expected value */
9272 if ((val & mask) != (wr_val & mask))
9273 goto test_reg_exit;
9274 }
9275 }
9276
9277 rc = 0;
9278
9279test_reg_exit:
9280 return rc;
9281}
9282
9283static int bnx2x_test_memory(struct bnx2x *bp)
9284{
9285 int i, j, rc = -ENODEV;
9286 u32 val;
9287 static const struct {
9288 u32 offset;
9289 int size;
9290 } mem_tbl[] = {
9291 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9292 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9293 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9294 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9295 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9296 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9297 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9298
9299 { 0xffffffff, 0 }
9300 };
9301 static const struct {
9302 char *name;
9303 u32 offset;
9dabc424
YG
9304 u32 e1_mask;
9305 u32 e1h_mask;
f3c87cdd 9306 } prty_tbl[] = {
9dabc424
YG
9307 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9308 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9309 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9310 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9311 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9312 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9313
9314 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9315 };
9316
9317 if (!netif_running(bp->dev))
9318 return rc;
9319
9320 /* Go through all the memories */
9321 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9322 for (j = 0; j < mem_tbl[i].size; j++)
9323 REG_RD(bp, mem_tbl[i].offset + j*4);
9324
9325 /* Check the parity status */
9326 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9327 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9328 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9329 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9330 DP(NETIF_MSG_HW,
9331 "%s is 0x%x\n", prty_tbl[i].name, val);
9332 goto test_mem_exit;
9333 }
9334 }
9335
9336 rc = 0;
9337
9338test_mem_exit:
9339 return rc;
9340}
9341
f3c87cdd
YG
9342static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9343{
9344 int cnt = 1000;
9345
9346 if (link_up)
9347 while (bnx2x_link_test(bp) && cnt--)
9348 msleep(10);
9349}
9350
9351static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9352{
9353 unsigned int pkt_size, num_pkts, i;
9354 struct sk_buff *skb;
9355 unsigned char *packet;
9356 struct bnx2x_fastpath *fp = &bp->fp[0];
9357 u16 tx_start_idx, tx_idx;
9358 u16 rx_start_idx, rx_idx;
9359 u16 pkt_prod;
9360 struct sw_tx_bd *tx_buf;
9361 struct eth_tx_bd *tx_bd;
9362 dma_addr_t mapping;
9363 union eth_rx_cqe *cqe;
9364 u8 cqe_fp_flags;
9365 struct sw_rx_bd *rx_buf;
9366 u16 len;
9367 int rc = -ENODEV;
9368
b5bf9068
EG
9369 /* check the loopback mode */
9370 switch (loopback_mode) {
9371 case BNX2X_PHY_LOOPBACK:
9372 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9373 return -EINVAL;
9374 break;
9375 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9376 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9377 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9378 break;
9379 default:
f3c87cdd 9380 return -EINVAL;
b5bf9068 9381 }
f3c87cdd 9382
b5bf9068
EG
9383 /* prepare the loopback packet */
9384 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9385 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9386 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9387 if (!skb) {
9388 rc = -ENOMEM;
9389 goto test_loopback_exit;
9390 }
9391 packet = skb_put(skb, pkt_size);
9392 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9393 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9394 for (i = ETH_HLEN; i < pkt_size; i++)
9395 packet[i] = (unsigned char) (i & 0xff);
9396
b5bf9068 9397 /* send the loopback packet */
f3c87cdd
YG
9398 num_pkts = 0;
9399 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9400 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9401
9402 pkt_prod = fp->tx_pkt_prod++;
9403 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9404 tx_buf->first_bd = fp->tx_bd_prod;
9405 tx_buf->skb = skb;
9406
9407 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9408 mapping = pci_map_single(bp->pdev, skb->data,
9409 skb_headlen(skb), PCI_DMA_TODEVICE);
9410 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9411 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9412 tx_bd->nbd = cpu_to_le16(1);
9413 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9414 tx_bd->vlan = cpu_to_le16(pkt_prod);
9415 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9416 ETH_TX_BD_FLAGS_END_BD);
9417 tx_bd->general_data = ((UNICAST_ADDRESS <<
9418 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9419
58f4c4cf
EG
9420 wmb();
9421
4781bfad 9422 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9423 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9424 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9425 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9426
9427 mmiowb();
9428
9429 num_pkts++;
9430 fp->tx_bd_prod++;
9431 bp->dev->trans_start = jiffies;
9432
9433 udelay(100);
9434
9435 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9436 if (tx_idx != tx_start_idx + num_pkts)
9437 goto test_loopback_exit;
9438
9439 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9440 if (rx_idx != rx_start_idx + num_pkts)
9441 goto test_loopback_exit;
9442
9443 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9444 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9445 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9446 goto test_loopback_rx_exit;
9447
9448 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9449 if (len != pkt_size)
9450 goto test_loopback_rx_exit;
9451
9452 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9453 skb = rx_buf->skb;
9454 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9455 for (i = ETH_HLEN; i < pkt_size; i++)
9456 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9457 goto test_loopback_rx_exit;
9458
9459 rc = 0;
9460
9461test_loopback_rx_exit:
f3c87cdd
YG
9462
9463 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9464 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9465 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9466 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9467
9468 /* Update producers */
9469 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9470 fp->rx_sge_prod);
f3c87cdd
YG
9471
9472test_loopback_exit:
9473 bp->link_params.loopback_mode = LOOPBACK_NONE;
9474
9475 return rc;
9476}
9477
9478static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9479{
b5bf9068 9480 int rc = 0, res;
f3c87cdd
YG
9481
9482 if (!netif_running(bp->dev))
9483 return BNX2X_LOOPBACK_FAILED;
9484
f8ef6e44 9485 bnx2x_netif_stop(bp, 1);
3910c8ae 9486 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9487
b5bf9068
EG
9488 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9489 if (res) {
9490 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9491 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9492 }
9493
b5bf9068
EG
9494 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9495 if (res) {
9496 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9497 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9498 }
9499
3910c8ae 9500 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9501 bnx2x_netif_start(bp);
9502
9503 return rc;
9504}
9505
9506#define CRC32_RESIDUAL 0xdebb20e3
9507
9508static int bnx2x_test_nvram(struct bnx2x *bp)
9509{
9510 static const struct {
9511 int offset;
9512 int size;
9513 } nvram_tbl[] = {
9514 { 0, 0x14 }, /* bootstrap */
9515 { 0x14, 0xec }, /* dir */
9516 { 0x100, 0x350 }, /* manuf_info */
9517 { 0x450, 0xf0 }, /* feature_info */
9518 { 0x640, 0x64 }, /* upgrade_key_info */
9519 { 0x6a4, 0x64 },
9520 { 0x708, 0x70 }, /* manuf_key_info */
9521 { 0x778, 0x70 },
9522 { 0, 0 }
9523 };
4781bfad 9524 __be32 buf[0x350 / 4];
f3c87cdd
YG
9525 u8 *data = (u8 *)buf;
9526 int i, rc;
9527 u32 magic, csum;
9528
9529 rc = bnx2x_nvram_read(bp, 0, data, 4);
9530 if (rc) {
f5372251 9531 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9532 goto test_nvram_exit;
9533 }
9534
9535 magic = be32_to_cpu(buf[0]);
9536 if (magic != 0x669955aa) {
9537 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9538 rc = -ENODEV;
9539 goto test_nvram_exit;
9540 }
9541
9542 for (i = 0; nvram_tbl[i].size; i++) {
9543
9544 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9545 nvram_tbl[i].size);
9546 if (rc) {
9547 DP(NETIF_MSG_PROBE,
f5372251 9548 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9549 goto test_nvram_exit;
9550 }
9551
9552 csum = ether_crc_le(nvram_tbl[i].size, data);
9553 if (csum != CRC32_RESIDUAL) {
9554 DP(NETIF_MSG_PROBE,
9555 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9556 rc = -ENODEV;
9557 goto test_nvram_exit;
9558 }
9559 }
9560
9561test_nvram_exit:
9562 return rc;
9563}
9564
9565static int bnx2x_test_intr(struct bnx2x *bp)
9566{
9567 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9568 int i, rc;
9569
9570 if (!netif_running(bp->dev))
9571 return -ENODEV;
9572
8d9c5f34 9573 config->hdr.length = 0;
af246401
EG
9574 if (CHIP_IS_E1(bp))
9575 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9576 else
9577 config->hdr.offset = BP_FUNC(bp);
0626b899 9578 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9579 config->hdr.reserved1 = 0;
9580
9581 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9582 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9583 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9584 if (rc == 0) {
9585 bp->set_mac_pending++;
9586 for (i = 0; i < 10; i++) {
9587 if (!bp->set_mac_pending)
9588 break;
9589 msleep_interruptible(10);
9590 }
9591 if (i == 10)
9592 rc = -ENODEV;
9593 }
9594
9595 return rc;
9596}
9597
a2fbb9ea
ET
9598static void bnx2x_self_test(struct net_device *dev,
9599 struct ethtool_test *etest, u64 *buf)
9600{
9601 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9602
9603 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9604
f3c87cdd 9605 if (!netif_running(dev))
a2fbb9ea 9606 return;
a2fbb9ea 9607
33471629 9608 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9609 if (IS_E1HMF(bp))
9610 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9611
9612 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9613 u8 link_up;
9614
9615 link_up = bp->link_vars.link_up;
9616 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9617 bnx2x_nic_load(bp, LOAD_DIAG);
9618 /* wait until link state is restored */
9619 bnx2x_wait_for_link(bp, link_up);
9620
9621 if (bnx2x_test_registers(bp) != 0) {
9622 buf[0] = 1;
9623 etest->flags |= ETH_TEST_FL_FAILED;
9624 }
9625 if (bnx2x_test_memory(bp) != 0) {
9626 buf[1] = 1;
9627 etest->flags |= ETH_TEST_FL_FAILED;
9628 }
9629 buf[2] = bnx2x_test_loopback(bp, link_up);
9630 if (buf[2] != 0)
9631 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9632
f3c87cdd
YG
9633 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9634 bnx2x_nic_load(bp, LOAD_NORMAL);
9635 /* wait until link state is restored */
9636 bnx2x_wait_for_link(bp, link_up);
9637 }
9638 if (bnx2x_test_nvram(bp) != 0) {
9639 buf[3] = 1;
a2fbb9ea
ET
9640 etest->flags |= ETH_TEST_FL_FAILED;
9641 }
f3c87cdd
YG
9642 if (bnx2x_test_intr(bp) != 0) {
9643 buf[4] = 1;
9644 etest->flags |= ETH_TEST_FL_FAILED;
9645 }
9646 if (bp->port.pmf)
9647 if (bnx2x_link_test(bp) != 0) {
9648 buf[5] = 1;
9649 etest->flags |= ETH_TEST_FL_FAILED;
9650 }
f3c87cdd
YG
9651
9652#ifdef BNX2X_EXTRA_DEBUG
9653 bnx2x_panic_dump(bp);
9654#endif
a2fbb9ea
ET
9655}
9656
de832a55
EG
9657static const struct {
9658 long offset;
9659 int size;
9660 u8 string[ETH_GSTRING_LEN];
9661} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9662/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9663 { Q_STATS_OFFSET32(error_bytes_received_hi),
9664 8, "[%d]: rx_error_bytes" },
9665 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9666 8, "[%d]: rx_ucast_packets" },
9667 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9668 8, "[%d]: rx_mcast_packets" },
9669 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9670 8, "[%d]: rx_bcast_packets" },
9671 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9672 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9673 4, "[%d]: rx_phy_ip_err_discards"},
9674 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9675 4, "[%d]: rx_skb_alloc_discard" },
9676 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9677
9678/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9679 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9680 8, "[%d]: tx_packets" }
9681};
9682
bb2a0f7a
YG
9683static const struct {
9684 long offset;
9685 int size;
9686 u32 flags;
66e855f3
YG
9687#define STATS_FLAGS_PORT 1
9688#define STATS_FLAGS_FUNC 2
de832a55 9689#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9690 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9691} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9692/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9693 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9694 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9695 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9696 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9697 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9698 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9699 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9700 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9701 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9702 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9703 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9704 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9705 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9706 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9707 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9708 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9709 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9710/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9711 8, STATS_FLAGS_PORT, "rx_fragments" },
9712 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9713 8, STATS_FLAGS_PORT, "rx_jabbers" },
9714 { STATS_OFFSET32(no_buff_discard_hi),
9715 8, STATS_FLAGS_BOTH, "rx_discards" },
9716 { STATS_OFFSET32(mac_filter_discard),
9717 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9718 { STATS_OFFSET32(xxoverflow_discard),
9719 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9720 { STATS_OFFSET32(brb_drop_hi),
9721 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9722 { STATS_OFFSET32(brb_truncate_hi),
9723 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9724 { STATS_OFFSET32(pause_frames_received_hi),
9725 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9726 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9727 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9728 { STATS_OFFSET32(nig_timer_max),
9729 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9730/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9731 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9732 { STATS_OFFSET32(rx_skb_alloc_failed),
9733 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9734 { STATS_OFFSET32(hw_csum_err),
9735 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9736
9737 { STATS_OFFSET32(total_bytes_transmitted_hi),
9738 8, STATS_FLAGS_BOTH, "tx_bytes" },
9739 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9740 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9741 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9742 8, STATS_FLAGS_BOTH, "tx_packets" },
9743 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9744 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9745 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9746 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9747 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9748 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9749 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9750 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9751/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9752 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9753 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9754 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9755 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9756 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9757 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9758 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9759 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9760 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9761 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9762 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9763 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9764 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9765 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9766 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9767 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9768 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9769 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9770 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9771/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9772 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9773 { STATS_OFFSET32(pause_frames_sent_hi),
9774 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9775};
9776
de832a55
EG
9777#define IS_PORT_STAT(i) \
9778 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9779#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9780#define IS_E1HMF_MODE_STAT(bp) \
9781 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9782
a2fbb9ea
ET
9783static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9784{
bb2a0f7a 9785 struct bnx2x *bp = netdev_priv(dev);
de832a55 9786 int i, j, k;
bb2a0f7a 9787
a2fbb9ea
ET
9788 switch (stringset) {
9789 case ETH_SS_STATS:
de832a55
EG
9790 if (is_multi(bp)) {
9791 k = 0;
9792 for_each_queue(bp, i) {
9793 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9794 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9795 bnx2x_q_stats_arr[j].string, i);
9796 k += BNX2X_NUM_Q_STATS;
9797 }
9798 if (IS_E1HMF_MODE_STAT(bp))
9799 break;
9800 for (j = 0; j < BNX2X_NUM_STATS; j++)
9801 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9802 bnx2x_stats_arr[j].string);
9803 } else {
9804 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9805 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9806 continue;
9807 strcpy(buf + j*ETH_GSTRING_LEN,
9808 bnx2x_stats_arr[i].string);
9809 j++;
9810 }
bb2a0f7a 9811 }
a2fbb9ea
ET
9812 break;
9813
9814 case ETH_SS_TEST:
9815 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9816 break;
9817 }
9818}
9819
9820static int bnx2x_get_stats_count(struct net_device *dev)
9821{
bb2a0f7a 9822 struct bnx2x *bp = netdev_priv(dev);
de832a55 9823 int i, num_stats;
bb2a0f7a 9824
de832a55
EG
9825 if (is_multi(bp)) {
9826 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9827 if (!IS_E1HMF_MODE_STAT(bp))
9828 num_stats += BNX2X_NUM_STATS;
9829 } else {
9830 if (IS_E1HMF_MODE_STAT(bp)) {
9831 num_stats = 0;
9832 for (i = 0; i < BNX2X_NUM_STATS; i++)
9833 if (IS_FUNC_STAT(i))
9834 num_stats++;
9835 } else
9836 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9837 }
de832a55 9838
bb2a0f7a 9839 return num_stats;
a2fbb9ea
ET
9840}
9841
9842static void bnx2x_get_ethtool_stats(struct net_device *dev,
9843 struct ethtool_stats *stats, u64 *buf)
9844{
9845 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9846 u32 *hw_stats, *offset;
9847 int i, j, k;
bb2a0f7a 9848
de832a55
EG
9849 if (is_multi(bp)) {
9850 k = 0;
9851 for_each_queue(bp, i) {
9852 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9853 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9854 if (bnx2x_q_stats_arr[j].size == 0) {
9855 /* skip this counter */
9856 buf[k + j] = 0;
9857 continue;
9858 }
9859 offset = (hw_stats +
9860 bnx2x_q_stats_arr[j].offset);
9861 if (bnx2x_q_stats_arr[j].size == 4) {
9862 /* 4-byte counter */
9863 buf[k + j] = (u64) *offset;
9864 continue;
9865 }
9866 /* 8-byte counter */
9867 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9868 }
9869 k += BNX2X_NUM_Q_STATS;
9870 }
9871 if (IS_E1HMF_MODE_STAT(bp))
9872 return;
9873 hw_stats = (u32 *)&bp->eth_stats;
9874 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9875 if (bnx2x_stats_arr[j].size == 0) {
9876 /* skip this counter */
9877 buf[k + j] = 0;
9878 continue;
9879 }
9880 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9881 if (bnx2x_stats_arr[j].size == 4) {
9882 /* 4-byte counter */
9883 buf[k + j] = (u64) *offset;
9884 continue;
9885 }
9886 /* 8-byte counter */
9887 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9888 }
de832a55
EG
9889 } else {
9890 hw_stats = (u32 *)&bp->eth_stats;
9891 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9892 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9893 continue;
9894 if (bnx2x_stats_arr[i].size == 0) {
9895 /* skip this counter */
9896 buf[j] = 0;
9897 j++;
9898 continue;
9899 }
9900 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9901 if (bnx2x_stats_arr[i].size == 4) {
9902 /* 4-byte counter */
9903 buf[j] = (u64) *offset;
9904 j++;
9905 continue;
9906 }
9907 /* 8-byte counter */
9908 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9909 j++;
a2fbb9ea 9910 }
a2fbb9ea
ET
9911 }
9912}
9913
9914static int bnx2x_phys_id(struct net_device *dev, u32 data)
9915{
9916 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9917 int port = BP_PORT(bp);
a2fbb9ea
ET
9918 int i;
9919
34f80b04
EG
9920 if (!netif_running(dev))
9921 return 0;
9922
9923 if (!bp->port.pmf)
9924 return 0;
9925
a2fbb9ea
ET
9926 if (data == 0)
9927 data = 2;
9928
9929 for (i = 0; i < (data * 2); i++) {
c18487ee 9930 if ((i % 2) == 0)
34f80b04 9931 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9932 bp->link_params.hw_led_mode,
9933 bp->link_params.chip_id);
9934 else
34f80b04 9935 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9936 bp->link_params.hw_led_mode,
9937 bp->link_params.chip_id);
9938
a2fbb9ea
ET
9939 msleep_interruptible(500);
9940 if (signal_pending(current))
9941 break;
9942 }
9943
c18487ee 9944 if (bp->link_vars.link_up)
34f80b04 9945 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9946 bp->link_vars.line_speed,
9947 bp->link_params.hw_led_mode,
9948 bp->link_params.chip_id);
a2fbb9ea
ET
9949
9950 return 0;
9951}
9952
9953static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9954 .get_settings = bnx2x_get_settings,
9955 .set_settings = bnx2x_set_settings,
9956 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9957 .get_wol = bnx2x_get_wol,
9958 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9959 .get_msglevel = bnx2x_get_msglevel,
9960 .set_msglevel = bnx2x_set_msglevel,
9961 .nway_reset = bnx2x_nway_reset,
9962 .get_link = ethtool_op_get_link,
9963 .get_eeprom_len = bnx2x_get_eeprom_len,
9964 .get_eeprom = bnx2x_get_eeprom,
9965 .set_eeprom = bnx2x_set_eeprom,
9966 .get_coalesce = bnx2x_get_coalesce,
9967 .set_coalesce = bnx2x_set_coalesce,
9968 .get_ringparam = bnx2x_get_ringparam,
9969 .set_ringparam = bnx2x_set_ringparam,
9970 .get_pauseparam = bnx2x_get_pauseparam,
9971 .set_pauseparam = bnx2x_set_pauseparam,
9972 .get_rx_csum = bnx2x_get_rx_csum,
9973 .set_rx_csum = bnx2x_set_rx_csum,
9974 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9975 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9976 .set_flags = bnx2x_set_flags,
9977 .get_flags = ethtool_op_get_flags,
9978 .get_sg = ethtool_op_get_sg,
9979 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9980 .get_tso = ethtool_op_get_tso,
9981 .set_tso = bnx2x_set_tso,
9982 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9983 .self_test = bnx2x_self_test,
9984 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9985 .phys_id = bnx2x_phys_id,
9986 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9987 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9988};
9989
9990/* end of ethtool_ops */
9991
9992/****************************************************************************
9993* General service functions
9994****************************************************************************/
9995
9996static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9997{
9998 u16 pmcsr;
9999
10000 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10001
10002 switch (state) {
10003 case PCI_D0:
34f80b04 10004 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10005 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10006 PCI_PM_CTRL_PME_STATUS));
10007
10008 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10009 /* delay required during transition out of D3hot */
a2fbb9ea 10010 msleep(20);
34f80b04 10011 break;
a2fbb9ea 10012
34f80b04
EG
10013 case PCI_D3hot:
10014 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10015 pmcsr |= 3;
a2fbb9ea 10016
34f80b04
EG
10017 if (bp->wol)
10018 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10019
34f80b04
EG
10020 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10021 pmcsr);
a2fbb9ea 10022
34f80b04
EG
10023 /* No more memory access after this point until
10024 * device is brought back to D0.
10025 */
10026 break;
10027
10028 default:
10029 return -EINVAL;
10030 }
10031 return 0;
a2fbb9ea
ET
10032}
10033
237907c1
EG
10034static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10035{
10036 u16 rx_cons_sb;
10037
10038 /* Tell compiler that status block fields can change */
10039 barrier();
10040 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10041 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10042 rx_cons_sb++;
10043 return (fp->rx_comp_cons != rx_cons_sb);
10044}
10045
34f80b04
EG
10046/*
10047 * net_device service functions
10048 */
10049
a2fbb9ea
ET
10050static int bnx2x_poll(struct napi_struct *napi, int budget)
10051{
10052 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10053 napi);
10054 struct bnx2x *bp = fp->bp;
10055 int work_done = 0;
10056
10057#ifdef BNX2X_STOP_ON_ERROR
10058 if (unlikely(bp->panic))
34f80b04 10059 goto poll_panic;
a2fbb9ea
ET
10060#endif
10061
10062 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10063 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10064 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10065
10066 bnx2x_update_fpsb_idx(fp);
10067
237907c1 10068 if (bnx2x_has_tx_work(fp))
7961f791 10069 bnx2x_tx_int(fp);
a2fbb9ea 10070
237907c1 10071 if (bnx2x_has_rx_work(fp))
a2fbb9ea 10072 work_done = bnx2x_rx_int(fp, budget);
356e2385 10073
da5a662a 10074 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
10075
10076 /* must not complete if we consumed full budget */
da5a662a 10077 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
10078
10079#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10080poll_panic:
a2fbb9ea 10081#endif
288379f0 10082 napi_complete(napi);
a2fbb9ea 10083
0626b899 10084 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10085 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10086 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10087 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10088 }
356e2385 10089
a2fbb9ea
ET
10090 return work_done;
10091}
10092
755735eb
EG
10093
10094/* we split the first BD into headers and data BDs
33471629 10095 * to ease the pain of our fellow microcode engineers
755735eb
EG
10096 * we use one mapping for both BDs
10097 * So far this has only been observed to happen
10098 * in Other Operating Systems(TM)
10099 */
10100static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10101 struct bnx2x_fastpath *fp,
10102 struct eth_tx_bd **tx_bd, u16 hlen,
10103 u16 bd_prod, int nbd)
10104{
10105 struct eth_tx_bd *h_tx_bd = *tx_bd;
10106 struct eth_tx_bd *d_tx_bd;
10107 dma_addr_t mapping;
10108 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10109
10110 /* first fix first BD */
10111 h_tx_bd->nbd = cpu_to_le16(nbd);
10112 h_tx_bd->nbytes = cpu_to_le16(hlen);
10113
10114 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10115 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10116 h_tx_bd->addr_lo, h_tx_bd->nbd);
10117
10118 /* now get a new data BD
10119 * (after the pbd) and fill it */
10120 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10121 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10122
10123 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10124 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10125
10126 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10127 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10128 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10129 d_tx_bd->vlan = 0;
10130 /* this marks the BD as one that has no individual mapping
10131 * the FW ignores this flag in a BD not marked start
10132 */
10133 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10134 DP(NETIF_MSG_TX_QUEUED,
10135 "TSO split data size is %d (%x:%x)\n",
10136 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10137
10138 /* update tx_bd for marking the last BD flag */
10139 *tx_bd = d_tx_bd;
10140
10141 return bd_prod;
10142}
10143
10144static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10145{
10146 if (fix > 0)
10147 csum = (u16) ~csum_fold(csum_sub(csum,
10148 csum_partial(t_header - fix, fix, 0)));
10149
10150 else if (fix < 0)
10151 csum = (u16) ~csum_fold(csum_add(csum,
10152 csum_partial(t_header, -fix, 0)));
10153
10154 return swab16(csum);
10155}
10156
10157static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10158{
10159 u32 rc;
10160
10161 if (skb->ip_summed != CHECKSUM_PARTIAL)
10162 rc = XMIT_PLAIN;
10163
10164 else {
4781bfad 10165 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10166 rc = XMIT_CSUM_V6;
10167 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10168 rc |= XMIT_CSUM_TCP;
10169
10170 } else {
10171 rc = XMIT_CSUM_V4;
10172 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10173 rc |= XMIT_CSUM_TCP;
10174 }
10175 }
10176
10177 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10178 rc |= XMIT_GSO_V4;
10179
10180 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10181 rc |= XMIT_GSO_V6;
10182
10183 return rc;
10184}
10185
632da4d6 10186#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10187/* check if packet requires linearization (packet is too fragmented)
10188 no need to check fragmentation if page size > 8K (there will be no
10189 violation to FW restrictions) */
755735eb
EG
10190static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10191 u32 xmit_type)
10192{
10193 int to_copy = 0;
10194 int hlen = 0;
10195 int first_bd_sz = 0;
10196
10197 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10198 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10199
10200 if (xmit_type & XMIT_GSO) {
10201 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10202 /* Check if LSO packet needs to be copied:
10203 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10204 int wnd_size = MAX_FETCH_BD - 3;
33471629 10205 /* Number of windows to check */
755735eb
EG
10206 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10207 int wnd_idx = 0;
10208 int frag_idx = 0;
10209 u32 wnd_sum = 0;
10210
10211 /* Headers length */
10212 hlen = (int)(skb_transport_header(skb) - skb->data) +
10213 tcp_hdrlen(skb);
10214
10215 /* Amount of data (w/o headers) on linear part of SKB*/
10216 first_bd_sz = skb_headlen(skb) - hlen;
10217
10218 wnd_sum = first_bd_sz;
10219
10220 /* Calculate the first sum - it's special */
10221 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10222 wnd_sum +=
10223 skb_shinfo(skb)->frags[frag_idx].size;
10224
10225 /* If there was data on linear skb data - check it */
10226 if (first_bd_sz > 0) {
10227 if (unlikely(wnd_sum < lso_mss)) {
10228 to_copy = 1;
10229 goto exit_lbl;
10230 }
10231
10232 wnd_sum -= first_bd_sz;
10233 }
10234
10235 /* Others are easier: run through the frag list and
10236 check all windows */
10237 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10238 wnd_sum +=
10239 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10240
10241 if (unlikely(wnd_sum < lso_mss)) {
10242 to_copy = 1;
10243 break;
10244 }
10245 wnd_sum -=
10246 skb_shinfo(skb)->frags[wnd_idx].size;
10247 }
755735eb
EG
10248 } else {
10249 /* in non-LSO too fragmented packet should always
10250 be linearized */
10251 to_copy = 1;
10252 }
10253 }
10254
10255exit_lbl:
10256 if (unlikely(to_copy))
10257 DP(NETIF_MSG_TX_QUEUED,
10258 "Linearization IS REQUIRED for %s packet. "
10259 "num_frags %d hlen %d first_bd_sz %d\n",
10260 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10261 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10262
10263 return to_copy;
10264}
632da4d6 10265#endif
755735eb
EG
10266
10267/* called with netif_tx_lock
a2fbb9ea 10268 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10269 * netif_wake_queue()
a2fbb9ea
ET
10270 */
10271static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10272{
10273 struct bnx2x *bp = netdev_priv(dev);
10274 struct bnx2x_fastpath *fp;
555f6c78 10275 struct netdev_queue *txq;
a2fbb9ea
ET
10276 struct sw_tx_bd *tx_buf;
10277 struct eth_tx_bd *tx_bd;
10278 struct eth_tx_parse_bd *pbd = NULL;
10279 u16 pkt_prod, bd_prod;
755735eb 10280 int nbd, fp_index;
a2fbb9ea 10281 dma_addr_t mapping;
755735eb
EG
10282 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10283 int vlan_off = (bp->e1hov ? 4 : 0);
10284 int i;
10285 u8 hlen = 0;
a2fbb9ea
ET
10286
10287#ifdef BNX2X_STOP_ON_ERROR
10288 if (unlikely(bp->panic))
10289 return NETDEV_TX_BUSY;
10290#endif
10291
555f6c78
EG
10292 fp_index = skb_get_queue_mapping(skb);
10293 txq = netdev_get_tx_queue(dev, fp_index);
10294
a2fbb9ea 10295 fp = &bp->fp[fp_index];
755735eb 10296
231fd58a 10297 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10298 fp->eth_q_stats.driver_xoff++,
555f6c78 10299 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10300 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10301 return NETDEV_TX_BUSY;
10302 }
10303
755735eb
EG
10304 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10305 " gso type %x xmit_type %x\n",
10306 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10307 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10308
632da4d6 10309#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10310 /* First, check if we need to linearize the skb (due to FW
10311 restrictions). No need to check fragmentation if page size > 8K
10312 (there will be no violation to FW restrictions) */
755735eb
EG
10313 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10314 /* Statistics of linearization */
10315 bp->lin_cnt++;
10316 if (skb_linearize(skb) != 0) {
10317 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10318 "silently dropping this SKB\n");
10319 dev_kfree_skb_any(skb);
da5a662a 10320 return NETDEV_TX_OK;
755735eb
EG
10321 }
10322 }
632da4d6 10323#endif
755735eb 10324
a2fbb9ea 10325 /*
755735eb 10326 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10327 then for TSO or xsum we have a parsing info BD,
755735eb 10328 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10329 (don't forget to mark the last one as last,
10330 and to unmap only AFTER you write to the BD ...)
755735eb 10331 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10332 */
10333
10334 pkt_prod = fp->tx_pkt_prod++;
755735eb 10335 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10336
755735eb 10337 /* get a tx_buf and first BD */
a2fbb9ea
ET
10338 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10339 tx_bd = &fp->tx_desc_ring[bd_prod];
10340
10341 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10342 tx_bd->general_data = (UNICAST_ADDRESS <<
10343 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10344 /* header nbd */
10345 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10346
755735eb
EG
10347 /* remember the first BD of the packet */
10348 tx_buf->first_bd = fp->tx_bd_prod;
10349 tx_buf->skb = skb;
a2fbb9ea
ET
10350
10351 DP(NETIF_MSG_TX_QUEUED,
10352 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10353 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10354
0c6671b0
EG
10355#ifdef BCM_VLAN
10356 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10357 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10358 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10359 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10360 vlan_off += 4;
10361 } else
0c6671b0 10362#endif
755735eb 10363 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10364
755735eb 10365 if (xmit_type) {
755735eb 10366 /* turn on parsing and get a BD */
a2fbb9ea
ET
10367 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10368 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10369
10370 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10371 }
10372
10373 if (xmit_type & XMIT_CSUM) {
10374 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10375
10376 /* for now NS flag is not used in Linux */
4781bfad
EG
10377 pbd->global_data =
10378 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10379 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10380
755735eb
EG
10381 pbd->ip_hlen = (skb_transport_header(skb) -
10382 skb_network_header(skb)) / 2;
10383
10384 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10385
755735eb
EG
10386 pbd->total_hlen = cpu_to_le16(hlen);
10387 hlen = hlen*2 - vlan_off;
a2fbb9ea 10388
755735eb
EG
10389 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10390
10391 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10392 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10393 ETH_TX_BD_FLAGS_IP_CSUM;
10394 else
10395 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10396
10397 if (xmit_type & XMIT_CSUM_TCP) {
10398 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10399
10400 } else {
10401 s8 fix = SKB_CS_OFF(skb); /* signed! */
10402
a2fbb9ea 10403 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10404 pbd->cs_offset = fix / 2;
a2fbb9ea 10405
755735eb
EG
10406 DP(NETIF_MSG_TX_QUEUED,
10407 "hlen %d offset %d fix %d csum before fix %x\n",
10408 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10409 SKB_CS(skb));
10410
10411 /* HW bug: fixup the CSUM */
10412 pbd->tcp_pseudo_csum =
10413 bnx2x_csum_fix(skb_transport_header(skb),
10414 SKB_CS(skb), fix);
10415
10416 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10417 pbd->tcp_pseudo_csum);
10418 }
a2fbb9ea
ET
10419 }
10420
10421 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10422 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10423
10424 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10425 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10426 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10427 tx_bd->nbd = cpu_to_le16(nbd);
10428 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10429
10430 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10431 " nbytes %d flags %x vlan %x\n",
10432 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10433 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10434 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10435
755735eb 10436 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10437
10438 DP(NETIF_MSG_TX_QUEUED,
10439 "TSO packet len %d hlen %d total len %d tso size %d\n",
10440 skb->len, hlen, skb_headlen(skb),
10441 skb_shinfo(skb)->gso_size);
10442
10443 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10444
755735eb
EG
10445 if (unlikely(skb_headlen(skb) > hlen))
10446 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10447 bd_prod, ++nbd);
a2fbb9ea
ET
10448
10449 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10450 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10451 pbd->tcp_flags = pbd_tcp_flags(skb);
10452
10453 if (xmit_type & XMIT_GSO_V4) {
10454 pbd->ip_id = swab16(ip_hdr(skb)->id);
10455 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10456 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10457 ip_hdr(skb)->daddr,
10458 0, IPPROTO_TCP, 0));
755735eb
EG
10459
10460 } else
10461 pbd->tcp_pseudo_csum =
10462 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10463 &ipv6_hdr(skb)->daddr,
10464 0, IPPROTO_TCP, 0));
10465
a2fbb9ea
ET
10466 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10467 }
10468
755735eb
EG
10469 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10470 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10471
755735eb
EG
10472 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10473 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10474
755735eb
EG
10475 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10476 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10477
755735eb
EG
10478 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10479 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10480 tx_bd->nbytes = cpu_to_le16(frag->size);
10481 tx_bd->vlan = cpu_to_le16(pkt_prod);
10482 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10483
755735eb
EG
10484 DP(NETIF_MSG_TX_QUEUED,
10485 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10486 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10487 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10488 }
10489
755735eb 10490 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10491 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10492
10493 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10494 tx_bd, tx_bd->bd_flags.as_bitfield);
10495
a2fbb9ea
ET
10496 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10497
755735eb 10498 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10499 * if the packet contains or ends with it
10500 */
10501 if (TX_BD_POFF(bd_prod) < nbd)
10502 nbd++;
10503
10504 if (pbd)
10505 DP(NETIF_MSG_TX_QUEUED,
10506 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10507 " tcp_flags %x xsum %x seq %u hlen %u\n",
10508 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10509 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10510 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10511
755735eb 10512 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10513
58f4c4cf
EG
10514 /*
10515 * Make sure that the BD data is updated before updating the producer
10516 * since FW might read the BD right after the producer is updated.
10517 * This is only applicable for weak-ordered memory model archs such
10518 * as IA-64. The following barrier is also mandatory since FW will
10519 * assumes packets must have BDs.
10520 */
10521 wmb();
10522
4781bfad 10523 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10524 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10525 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10526 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10527
10528 mmiowb();
10529
755735eb 10530 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10531 dev->trans_start = jiffies;
10532
10533 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10534 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10535 if we put Tx into XOFF state. */
10536 smp_mb();
555f6c78 10537 netif_tx_stop_queue(txq);
de832a55 10538 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10539 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10540 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10541 }
10542 fp->tx_pkt++;
10543
10544 return NETDEV_TX_OK;
10545}
10546
bb2a0f7a 10547/* called with rtnl_lock */
a2fbb9ea
ET
10548static int bnx2x_open(struct net_device *dev)
10549{
10550 struct bnx2x *bp = netdev_priv(dev);
10551
6eccabb3
EG
10552 netif_carrier_off(dev);
10553
a2fbb9ea
ET
10554 bnx2x_set_power_state(bp, PCI_D0);
10555
bb2a0f7a 10556 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10557}
10558
bb2a0f7a 10559/* called with rtnl_lock */
a2fbb9ea
ET
10560static int bnx2x_close(struct net_device *dev)
10561{
a2fbb9ea
ET
10562 struct bnx2x *bp = netdev_priv(dev);
10563
10564 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10565 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10566 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10567 if (!CHIP_REV_IS_SLOW(bp))
10568 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10569
10570 return 0;
10571}
10572
f5372251 10573/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10574static void bnx2x_set_rx_mode(struct net_device *dev)
10575{
10576 struct bnx2x *bp = netdev_priv(dev);
10577 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10578 int port = BP_PORT(bp);
10579
10580 if (bp->state != BNX2X_STATE_OPEN) {
10581 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10582 return;
10583 }
10584
10585 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10586
10587 if (dev->flags & IFF_PROMISC)
10588 rx_mode = BNX2X_RX_MODE_PROMISC;
10589
10590 else if ((dev->flags & IFF_ALLMULTI) ||
10591 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10592 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10593
10594 else { /* some multicasts */
10595 if (CHIP_IS_E1(bp)) {
10596 int i, old, offset;
10597 struct dev_mc_list *mclist;
10598 struct mac_configuration_cmd *config =
10599 bnx2x_sp(bp, mcast_config);
10600
10601 for (i = 0, mclist = dev->mc_list;
10602 mclist && (i < dev->mc_count);
10603 i++, mclist = mclist->next) {
10604
10605 config->config_table[i].
10606 cam_entry.msb_mac_addr =
10607 swab16(*(u16 *)&mclist->dmi_addr[0]);
10608 config->config_table[i].
10609 cam_entry.middle_mac_addr =
10610 swab16(*(u16 *)&mclist->dmi_addr[2]);
10611 config->config_table[i].
10612 cam_entry.lsb_mac_addr =
10613 swab16(*(u16 *)&mclist->dmi_addr[4]);
10614 config->config_table[i].cam_entry.flags =
10615 cpu_to_le16(port);
10616 config->config_table[i].
10617 target_table_entry.flags = 0;
10618 config->config_table[i].
10619 target_table_entry.client_id = 0;
10620 config->config_table[i].
10621 target_table_entry.vlan_id = 0;
10622
10623 DP(NETIF_MSG_IFUP,
10624 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10625 config->config_table[i].
10626 cam_entry.msb_mac_addr,
10627 config->config_table[i].
10628 cam_entry.middle_mac_addr,
10629 config->config_table[i].
10630 cam_entry.lsb_mac_addr);
10631 }
8d9c5f34 10632 old = config->hdr.length;
34f80b04
EG
10633 if (old > i) {
10634 for (; i < old; i++) {
10635 if (CAM_IS_INVALID(config->
10636 config_table[i])) {
af246401 10637 /* already invalidated */
34f80b04
EG
10638 break;
10639 }
10640 /* invalidate */
10641 CAM_INVALIDATE(config->
10642 config_table[i]);
10643 }
10644 }
10645
10646 if (CHIP_REV_IS_SLOW(bp))
10647 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10648 else
10649 offset = BNX2X_MAX_MULTICAST*(1 + port);
10650
8d9c5f34 10651 config->hdr.length = i;
34f80b04 10652 config->hdr.offset = offset;
8d9c5f34 10653 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10654 config->hdr.reserved1 = 0;
10655
10656 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10657 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10658 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10659 0);
10660 } else { /* E1H */
10661 /* Accept one or more multicasts */
10662 struct dev_mc_list *mclist;
10663 u32 mc_filter[MC_HASH_SIZE];
10664 u32 crc, bit, regidx;
10665 int i;
10666
10667 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10668
10669 for (i = 0, mclist = dev->mc_list;
10670 mclist && (i < dev->mc_count);
10671 i++, mclist = mclist->next) {
10672
7c510e4b
JB
10673 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10674 mclist->dmi_addr);
34f80b04
EG
10675
10676 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10677 bit = (crc >> 24) & 0xff;
10678 regidx = bit >> 5;
10679 bit &= 0x1f;
10680 mc_filter[regidx] |= (1 << bit);
10681 }
10682
10683 for (i = 0; i < MC_HASH_SIZE; i++)
10684 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10685 mc_filter[i]);
10686 }
10687 }
10688
10689 bp->rx_mode = rx_mode;
10690 bnx2x_set_storm_rx_mode(bp);
10691}
10692
10693/* called with rtnl_lock */
a2fbb9ea
ET
10694static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10695{
10696 struct sockaddr *addr = p;
10697 struct bnx2x *bp = netdev_priv(dev);
10698
34f80b04 10699 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10700 return -EINVAL;
10701
10702 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10703 if (netif_running(dev)) {
10704 if (CHIP_IS_E1(bp))
3101c2bc 10705 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10706 else
3101c2bc 10707 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10708 }
a2fbb9ea
ET
10709
10710 return 0;
10711}
10712
c18487ee 10713/* called with rtnl_lock */
a2fbb9ea
ET
10714static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10715{
10716 struct mii_ioctl_data *data = if_mii(ifr);
10717 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10718 int port = BP_PORT(bp);
a2fbb9ea
ET
10719 int err;
10720
10721 switch (cmd) {
10722 case SIOCGMIIPHY:
34f80b04 10723 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10724
c14423fe 10725 /* fallthrough */
c18487ee 10726
a2fbb9ea 10727 case SIOCGMIIREG: {
c18487ee 10728 u16 mii_regval;
a2fbb9ea 10729
c18487ee
YR
10730 if (!netif_running(dev))
10731 return -EAGAIN;
a2fbb9ea 10732
34f80b04 10733 mutex_lock(&bp->port.phy_mutex);
3196a88a 10734 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10735 DEFAULT_PHY_DEV_ADDR,
10736 (data->reg_num & 0x1f), &mii_regval);
10737 data->val_out = mii_regval;
34f80b04 10738 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10739 return err;
10740 }
10741
10742 case SIOCSMIIREG:
10743 if (!capable(CAP_NET_ADMIN))
10744 return -EPERM;
10745
c18487ee
YR
10746 if (!netif_running(dev))
10747 return -EAGAIN;
10748
34f80b04 10749 mutex_lock(&bp->port.phy_mutex);
3196a88a 10750 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10751 DEFAULT_PHY_DEV_ADDR,
10752 (data->reg_num & 0x1f), data->val_in);
34f80b04 10753 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10754 return err;
10755
10756 default:
10757 /* do nothing */
10758 break;
10759 }
10760
10761 return -EOPNOTSUPP;
10762}
10763
34f80b04 10764/* called with rtnl_lock */
a2fbb9ea
ET
10765static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10766{
10767 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10768 int rc = 0;
a2fbb9ea
ET
10769
10770 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10771 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10772 return -EINVAL;
10773
10774 /* This does not race with packet allocation
c14423fe 10775 * because the actual alloc size is
a2fbb9ea
ET
10776 * only updated as part of load
10777 */
10778 dev->mtu = new_mtu;
10779
10780 if (netif_running(dev)) {
34f80b04
EG
10781 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10782 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10783 }
34f80b04
EG
10784
10785 return rc;
a2fbb9ea
ET
10786}
10787
10788static void bnx2x_tx_timeout(struct net_device *dev)
10789{
10790 struct bnx2x *bp = netdev_priv(dev);
10791
10792#ifdef BNX2X_STOP_ON_ERROR
10793 if (!bp->panic)
10794 bnx2x_panic();
10795#endif
10796 /* This allows the netif to be shutdown gracefully before resetting */
10797 schedule_work(&bp->reset_task);
10798}
10799
10800#ifdef BCM_VLAN
34f80b04 10801/* called with rtnl_lock */
a2fbb9ea
ET
10802static void bnx2x_vlan_rx_register(struct net_device *dev,
10803 struct vlan_group *vlgrp)
10804{
10805 struct bnx2x *bp = netdev_priv(dev);
10806
10807 bp->vlgrp = vlgrp;
0c6671b0
EG
10808
10809 /* Set flags according to the required capabilities */
10810 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10811
10812 if (dev->features & NETIF_F_HW_VLAN_TX)
10813 bp->flags |= HW_VLAN_TX_FLAG;
10814
10815 if (dev->features & NETIF_F_HW_VLAN_RX)
10816 bp->flags |= HW_VLAN_RX_FLAG;
10817
a2fbb9ea 10818 if (netif_running(dev))
49d66772 10819 bnx2x_set_client_config(bp);
a2fbb9ea 10820}
34f80b04 10821
a2fbb9ea
ET
10822#endif
10823
10824#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10825static void poll_bnx2x(struct net_device *dev)
10826{
10827 struct bnx2x *bp = netdev_priv(dev);
10828
10829 disable_irq(bp->pdev->irq);
10830 bnx2x_interrupt(bp->pdev->irq, dev);
10831 enable_irq(bp->pdev->irq);
10832}
10833#endif
10834
c64213cd
SH
10835static const struct net_device_ops bnx2x_netdev_ops = {
10836 .ndo_open = bnx2x_open,
10837 .ndo_stop = bnx2x_close,
10838 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 10839 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
10840 .ndo_set_mac_address = bnx2x_change_mac_addr,
10841 .ndo_validate_addr = eth_validate_addr,
10842 .ndo_do_ioctl = bnx2x_ioctl,
10843 .ndo_change_mtu = bnx2x_change_mtu,
10844 .ndo_tx_timeout = bnx2x_tx_timeout,
10845#ifdef BCM_VLAN
10846 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10847#endif
10848#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10849 .ndo_poll_controller = poll_bnx2x,
10850#endif
10851};
10852
34f80b04
EG
10853static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10854 struct net_device *dev)
a2fbb9ea
ET
10855{
10856 struct bnx2x *bp;
10857 int rc;
10858
10859 SET_NETDEV_DEV(dev, &pdev->dev);
10860 bp = netdev_priv(dev);
10861
34f80b04
EG
10862 bp->dev = dev;
10863 bp->pdev = pdev;
a2fbb9ea 10864 bp->flags = 0;
34f80b04 10865 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10866
10867 rc = pci_enable_device(pdev);
10868 if (rc) {
10869 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10870 goto err_out;
10871 }
10872
10873 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10874 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10875 " aborting\n");
10876 rc = -ENODEV;
10877 goto err_out_disable;
10878 }
10879
10880 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10881 printk(KERN_ERR PFX "Cannot find second PCI device"
10882 " base address, aborting\n");
10883 rc = -ENODEV;
10884 goto err_out_disable;
10885 }
10886
34f80b04
EG
10887 if (atomic_read(&pdev->enable_cnt) == 1) {
10888 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10889 if (rc) {
10890 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10891 " aborting\n");
10892 goto err_out_disable;
10893 }
a2fbb9ea 10894
34f80b04
EG
10895 pci_set_master(pdev);
10896 pci_save_state(pdev);
10897 }
a2fbb9ea
ET
10898
10899 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10900 if (bp->pm_cap == 0) {
10901 printk(KERN_ERR PFX "Cannot find power management"
10902 " capability, aborting\n");
10903 rc = -EIO;
10904 goto err_out_release;
10905 }
10906
10907 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10908 if (bp->pcie_cap == 0) {
10909 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10910 " aborting\n");
10911 rc = -EIO;
10912 goto err_out_release;
10913 }
10914
10915 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10916 bp->flags |= USING_DAC_FLAG;
10917 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10918 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10919 " failed, aborting\n");
10920 rc = -EIO;
10921 goto err_out_release;
10922 }
10923
10924 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10925 printk(KERN_ERR PFX "System does not support DMA,"
10926 " aborting\n");
10927 rc = -EIO;
10928 goto err_out_release;
10929 }
10930
34f80b04
EG
10931 dev->mem_start = pci_resource_start(pdev, 0);
10932 dev->base_addr = dev->mem_start;
10933 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10934
10935 dev->irq = pdev->irq;
10936
275f165f 10937 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10938 if (!bp->regview) {
10939 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10940 rc = -ENOMEM;
10941 goto err_out_release;
10942 }
10943
34f80b04
EG
10944 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10945 min_t(u64, BNX2X_DB_SIZE,
10946 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10947 if (!bp->doorbells) {
10948 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10949 rc = -ENOMEM;
10950 goto err_out_unmap;
10951 }
10952
10953 bnx2x_set_power_state(bp, PCI_D0);
10954
34f80b04
EG
10955 /* clean indirect addresses */
10956 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10957 PCICFG_VENDOR_ID_OFFSET);
10958 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10959 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10960 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10961 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10962
34f80b04 10963 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10964
c64213cd 10965 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10966 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10967 dev->features |= NETIF_F_SG;
10968 dev->features |= NETIF_F_HW_CSUM;
10969 if (bp->flags & USING_DAC_FLAG)
10970 dev->features |= NETIF_F_HIGHDMA;
10971#ifdef BCM_VLAN
10972 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10973 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10974#endif
10975 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10976 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10977
10978 return 0;
10979
10980err_out_unmap:
10981 if (bp->regview) {
10982 iounmap(bp->regview);
10983 bp->regview = NULL;
10984 }
a2fbb9ea
ET
10985 if (bp->doorbells) {
10986 iounmap(bp->doorbells);
10987 bp->doorbells = NULL;
10988 }
10989
10990err_out_release:
34f80b04
EG
10991 if (atomic_read(&pdev->enable_cnt) == 1)
10992 pci_release_regions(pdev);
a2fbb9ea
ET
10993
10994err_out_disable:
10995 pci_disable_device(pdev);
10996 pci_set_drvdata(pdev, NULL);
10997
10998err_out:
10999 return rc;
11000}
11001
25047950
ET
11002static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11003{
11004 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11005
11006 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11007 return val;
11008}
11009
11010/* return value of 1=2.5GHz 2=5GHz */
11011static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11012{
11013 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11014
11015 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11016 return val;
11017}
11018
a2fbb9ea
ET
11019static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11020 const struct pci_device_id *ent)
11021{
11022 static int version_printed;
11023 struct net_device *dev = NULL;
11024 struct bnx2x *bp;
25047950 11025 int rc;
a2fbb9ea
ET
11026
11027 if (version_printed++ == 0)
11028 printk(KERN_INFO "%s", version);
11029
11030 /* dev zeroed in init_etherdev */
555f6c78 11031 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11032 if (!dev) {
11033 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11034 return -ENOMEM;
34f80b04 11035 }
a2fbb9ea 11036
a2fbb9ea
ET
11037 bp = netdev_priv(dev);
11038 bp->msglevel = debug;
11039
34f80b04 11040 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11041 if (rc < 0) {
11042 free_netdev(dev);
11043 return rc;
11044 }
11045
a2fbb9ea
ET
11046 pci_set_drvdata(pdev, dev);
11047
34f80b04 11048 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11049 if (rc)
11050 goto init_one_exit;
11051
11052 rc = register_netdev(dev);
34f80b04 11053 if (rc) {
693fc0d1 11054 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11055 goto init_one_exit;
11056 }
11057
25047950 11058 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11059 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11060 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11061 bnx2x_get_pcie_width(bp),
11062 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11063 dev->base_addr, bp->pdev->irq);
e174961c 11064 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 11065 return 0;
34f80b04
EG
11066
11067init_one_exit:
11068 if (bp->regview)
11069 iounmap(bp->regview);
11070
11071 if (bp->doorbells)
11072 iounmap(bp->doorbells);
11073
11074 free_netdev(dev);
11075
11076 if (atomic_read(&pdev->enable_cnt) == 1)
11077 pci_release_regions(pdev);
11078
11079 pci_disable_device(pdev);
11080 pci_set_drvdata(pdev, NULL);
11081
11082 return rc;
a2fbb9ea
ET
11083}
11084
11085static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11086{
11087 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11088 struct bnx2x *bp;
11089
11090 if (!dev) {
228241eb
ET
11091 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11092 return;
11093 }
228241eb 11094 bp = netdev_priv(dev);
a2fbb9ea 11095
a2fbb9ea
ET
11096 unregister_netdev(dev);
11097
11098 if (bp->regview)
11099 iounmap(bp->regview);
11100
11101 if (bp->doorbells)
11102 iounmap(bp->doorbells);
11103
11104 free_netdev(dev);
34f80b04
EG
11105
11106 if (atomic_read(&pdev->enable_cnt) == 1)
11107 pci_release_regions(pdev);
11108
a2fbb9ea
ET
11109 pci_disable_device(pdev);
11110 pci_set_drvdata(pdev, NULL);
11111}
11112
11113static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11114{
11115 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11116 struct bnx2x *bp;
11117
34f80b04
EG
11118 if (!dev) {
11119 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11120 return -ENODEV;
11121 }
11122 bp = netdev_priv(dev);
a2fbb9ea 11123
34f80b04 11124 rtnl_lock();
a2fbb9ea 11125
34f80b04 11126 pci_save_state(pdev);
228241eb 11127
34f80b04
EG
11128 if (!netif_running(dev)) {
11129 rtnl_unlock();
11130 return 0;
11131 }
a2fbb9ea
ET
11132
11133 netif_device_detach(dev);
a2fbb9ea 11134
da5a662a 11135 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11136
a2fbb9ea 11137 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11138
34f80b04
EG
11139 rtnl_unlock();
11140
a2fbb9ea
ET
11141 return 0;
11142}
11143
11144static int bnx2x_resume(struct pci_dev *pdev)
11145{
11146 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11147 struct bnx2x *bp;
a2fbb9ea
ET
11148 int rc;
11149
228241eb
ET
11150 if (!dev) {
11151 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11152 return -ENODEV;
11153 }
228241eb 11154 bp = netdev_priv(dev);
a2fbb9ea 11155
34f80b04
EG
11156 rtnl_lock();
11157
228241eb 11158 pci_restore_state(pdev);
34f80b04
EG
11159
11160 if (!netif_running(dev)) {
11161 rtnl_unlock();
11162 return 0;
11163 }
11164
a2fbb9ea
ET
11165 bnx2x_set_power_state(bp, PCI_D0);
11166 netif_device_attach(dev);
11167
da5a662a 11168 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11169
34f80b04
EG
11170 rtnl_unlock();
11171
11172 return rc;
a2fbb9ea
ET
11173}
11174
f8ef6e44
YG
11175static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11176{
11177 int i;
11178
11179 bp->state = BNX2X_STATE_ERROR;
11180
11181 bp->rx_mode = BNX2X_RX_MODE_NONE;
11182
11183 bnx2x_netif_stop(bp, 0);
11184
11185 del_timer_sync(&bp->timer);
11186 bp->stats_state = STATS_STATE_DISABLED;
11187 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11188
11189 /* Release IRQs */
11190 bnx2x_free_irq(bp);
11191
11192 if (CHIP_IS_E1(bp)) {
11193 struct mac_configuration_cmd *config =
11194 bnx2x_sp(bp, mcast_config);
11195
8d9c5f34 11196 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11197 CAM_INVALIDATE(config->config_table[i]);
11198 }
11199
11200 /* Free SKBs, SGEs, TPA pool and driver internals */
11201 bnx2x_free_skbs(bp);
555f6c78 11202 for_each_rx_queue(bp, i)
f8ef6e44 11203 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11204 for_each_rx_queue(bp, i)
7cde1c8b 11205 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11206 bnx2x_free_mem(bp);
11207
11208 bp->state = BNX2X_STATE_CLOSED;
11209
11210 netif_carrier_off(bp->dev);
11211
11212 return 0;
11213}
11214
11215static void bnx2x_eeh_recover(struct bnx2x *bp)
11216{
11217 u32 val;
11218
11219 mutex_init(&bp->port.phy_mutex);
11220
11221 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11222 bp->link_params.shmem_base = bp->common.shmem_base;
11223 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11224
11225 if (!bp->common.shmem_base ||
11226 (bp->common.shmem_base < 0xA0000) ||
11227 (bp->common.shmem_base >= 0xC0000)) {
11228 BNX2X_DEV_INFO("MCP not active\n");
11229 bp->flags |= NO_MCP_FLAG;
11230 return;
11231 }
11232
11233 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11234 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11235 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11236 BNX2X_ERR("BAD MCP validity signature\n");
11237
11238 if (!BP_NOMCP(bp)) {
11239 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11240 & DRV_MSG_SEQ_NUMBER_MASK);
11241 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11242 }
11243}
11244
493adb1f
WX
11245/**
11246 * bnx2x_io_error_detected - called when PCI error is detected
11247 * @pdev: Pointer to PCI device
11248 * @state: The current pci connection state
11249 *
11250 * This function is called after a PCI bus error affecting
11251 * this device has been detected.
11252 */
11253static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11254 pci_channel_state_t state)
11255{
11256 struct net_device *dev = pci_get_drvdata(pdev);
11257 struct bnx2x *bp = netdev_priv(dev);
11258
11259 rtnl_lock();
11260
11261 netif_device_detach(dev);
11262
11263 if (netif_running(dev))
f8ef6e44 11264 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11265
11266 pci_disable_device(pdev);
11267
11268 rtnl_unlock();
11269
11270 /* Request a slot reset */
11271 return PCI_ERS_RESULT_NEED_RESET;
11272}
11273
11274/**
11275 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11276 * @pdev: Pointer to PCI device
11277 *
11278 * Restart the card from scratch, as if from a cold-boot.
11279 */
11280static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11281{
11282 struct net_device *dev = pci_get_drvdata(pdev);
11283 struct bnx2x *bp = netdev_priv(dev);
11284
11285 rtnl_lock();
11286
11287 if (pci_enable_device(pdev)) {
11288 dev_err(&pdev->dev,
11289 "Cannot re-enable PCI device after reset\n");
11290 rtnl_unlock();
11291 return PCI_ERS_RESULT_DISCONNECT;
11292 }
11293
11294 pci_set_master(pdev);
11295 pci_restore_state(pdev);
11296
11297 if (netif_running(dev))
11298 bnx2x_set_power_state(bp, PCI_D0);
11299
11300 rtnl_unlock();
11301
11302 return PCI_ERS_RESULT_RECOVERED;
11303}
11304
11305/**
11306 * bnx2x_io_resume - called when traffic can start flowing again
11307 * @pdev: Pointer to PCI device
11308 *
11309 * This callback is called when the error recovery driver tells us that
11310 * its OK to resume normal operation.
11311 */
11312static void bnx2x_io_resume(struct pci_dev *pdev)
11313{
11314 struct net_device *dev = pci_get_drvdata(pdev);
11315 struct bnx2x *bp = netdev_priv(dev);
11316
11317 rtnl_lock();
11318
f8ef6e44
YG
11319 bnx2x_eeh_recover(bp);
11320
493adb1f 11321 if (netif_running(dev))
f8ef6e44 11322 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11323
11324 netif_device_attach(dev);
11325
11326 rtnl_unlock();
11327}
11328
11329static struct pci_error_handlers bnx2x_err_handler = {
11330 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11331 .slot_reset = bnx2x_io_slot_reset,
11332 .resume = bnx2x_io_resume,
493adb1f
WX
11333};
11334
a2fbb9ea 11335static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11336 .name = DRV_MODULE_NAME,
11337 .id_table = bnx2x_pci_tbl,
11338 .probe = bnx2x_init_one,
11339 .remove = __devexit_p(bnx2x_remove_one),
11340 .suspend = bnx2x_suspend,
11341 .resume = bnx2x_resume,
11342 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11343};
11344
11345static int __init bnx2x_init(void)
11346{
1cf167f2
EG
11347 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11348 if (bnx2x_wq == NULL) {
11349 printk(KERN_ERR PFX "Cannot create workqueue\n");
11350 return -ENOMEM;
11351 }
11352
a2fbb9ea
ET
11353 return pci_register_driver(&bnx2x_pci_driver);
11354}
11355
11356static void __exit bnx2x_cleanup(void)
11357{
11358 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11359
11360 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11361}
11362
11363module_init(bnx2x_init);
11364module_exit(bnx2x_cleanup);
11365